summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap7
-rw-r--r--CREDITS23
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-caps6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor3
-rw-r--r--Documentation/ABI/testing/sysfs-class-devfreq3
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-damon33
-rw-r--r--Documentation/RAS/ras.rst26
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst48
-rw-r--r--Documentation/admin-guide/kdump/vmcoreinfo.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt31
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst147
-rw-r--r--Documentation/admin-guide/mm/ksm.rst55
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst1
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst97
-rw-r--r--Documentation/admin-guide/mm/userfaultfd.rst3
-rw-r--r--Documentation/admin-guide/mm/zswap.rst20
-rw-r--r--Documentation/admin-guide/perf/dwc_pcie_pmu.rst94
-rw-r--r--Documentation/admin-guide/perf/imx-ddr.rst45
-rw-r--r--Documentation/admin-guide/perf/index.rst1
-rw-r--r--Documentation/arch/arm64/arm-acpi.rst2
-rw-r--r--Documentation/arch/arm64/perf.rst72
-rw-r--r--Documentation/arch/x86/cpuinfo.rst89
-rw-r--r--Documentation/arch/x86/pti.rst10
-rw-r--r--Documentation/core-api/maple_tree.rst4
-rw-r--r--Documentation/core-api/mm-api.rst2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml54
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml5
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/u-boot.yaml2
-rw-r--r--Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml3
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml14
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml4
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml19
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml13
-rw-r--r--Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml66
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,rspi.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml7
-rw-r--r--Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml10
-rw-r--r--Documentation/devicetree/bindings/thermal/mediatek,thermal.yaml99
-rw-r--r--Documentation/devicetree/bindings/thermal/mediatek-thermal.txt52
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml8
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml16
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml16
-rw-r--r--Documentation/driver-api/mtd/spi-nor.rst262
-rw-r--r--Documentation/driver-api/surface_aggregator/ssh.rst2
-rw-r--r--Documentation/filesystems/locking.rst4
-rw-r--r--Documentation/filesystems/porting.rst12
-rw-r--r--Documentation/filesystems/proc.rst6
-rw-r--r--Documentation/filesystems/squashfs.rst60
-rw-r--r--Documentation/filesystems/vfs.rst6
-rw-r--r--Documentation/i2c/i2c-address-translators.rst2
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/locking/mutex-design.rst18
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst2
-rw-r--r--Documentation/mm/damon/design.rst37
-rw-r--r--Documentation/mm/transhuge.rst4
-rw-r--r--Documentation/mm/unevictable-lru.rst4
-rw-r--r--Documentation/networking/ip-sysctl.rst2
-rw-r--r--Documentation/networking/packet_mmap.rst14
-rw-r--r--Documentation/power/freezing-of-tasks.rst85
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst8
-rw-r--r--Documentation/scheduler/schedutil.rst7
-rw-r--r--Documentation/spi/pxa2xx.rst59
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist4
-rw-r--r--Documentation/translations/zh_CN/process/submit-checklist.rst3
-rw-r--r--Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst8
-rw-r--r--Documentation/translations/zh_CN/scheduler/schedutil.rst7
-rw-r--r--Documentation/translations/zh_TW/process/submit-checklist.rst3
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst4
-rw-r--r--Documentation/userspace-api/lsm.rst73
-rw-r--r--MAINTAINERS127
-rw-r--r--Makefile8
-rw-r--r--arch/Kconfig21
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/alpha/lib/Makefile1
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/include/asm/topology.h1
-rw-r--r--arch/arm/kernel/perf_event_v6.c28
-rw-r--r--arch/arm/kernel/perf_event_v7.c50
-rw-r--r--arch/arm/kernel/perf_event_xscale.c44
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c8
-rw-r--r--arch/arm/tools/syscall.tbl5
-rw-r--r--arch/arm64/Kconfig25
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/Makefile2
-rwxr-xr-xarch/arm64/boot/install.sh3
-rw-r--r--arch/arm64/include/asm/assembler.h2
-rw-r--r--arch/arm64/include/asm/cache.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h6
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h8
-rw-r--r--arch/arm64/include/asm/kasan.h22
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h27
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h7
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h2
-rw-r--r--arch/arm64/include/asm/memory.h45
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h2
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/simd.h11
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/stacktrace/common.h19
-rw-r--r--arch/arm64/include/asm/stacktrace/nvhe.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h25
-rw-r--r--arch/arm64/include/asm/thread_info.h1
-rw-r--r--arch/arm64/include/asm/tlb.h15
-rw-r--r--arch/arm64/include/asm/tlbflush.h100
-rw-r--r--arch/arm64/include/asm/topology.h1
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h10
-rw-r--r--arch/arm64/kernel/cpufeature.c160
-rw-r--r--arch/arm64/kernel/cpuinfo.c5
-rw-r--r--arch/arm64/kernel/fpsimd.c169
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/idreg-override.c153
-rw-r--r--arch/arm64/kernel/irq.c7
-rw-r--r--arch/arm64/kernel/kaslr.c7
-rw-r--r--arch/arm64/kernel/kexec_image.c6
-rw-r--r--arch/arm64/kernel/machine_kexec.c26
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c12
-rw-r--r--arch/arm64/kernel/pi/Makefile1
-rw-r--r--arch/arm64/kernel/smp.c12
-rw-r--r--arch/arm64/kernel/stacktrace.c146
-rw-r--r--arch/arm64/kernel/topology.c26
-rw-r--r--arch/arm64/kernel/vdso32/Makefile8
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c61
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c13
-rw-r--r--arch/arm64/kvm/pmu-emul.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/arm64/lib/copy_page.S11
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/kasan_init.c5
-rw-r--r--arch/arm64/mm/mmu.c6
-rw-r--r--arch/arm64/tools/cpucaps2
-rw-r--r--arch/arm64/tools/sysreg325
-rw-r--r--arch/hexagon/include/asm/irq.h3
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/hexagon/kernel/reset.c1
-rw-r--r--arch/hexagon/kernel/signal.c2
-rw-r--r--arch/hexagon/kernel/smp.c4
-rw-r--r--arch/hexagon/kernel/time.c4
-rw-r--r--arch/hexagon/kernel/traps.c11
-rw-r--r--arch/hexagon/kernel/vdso.c1
-rw-r--r--arch/hexagon/kernel/vm_events.c7
-rw-r--r--arch/hexagon/mm/init.c3
-rw-r--r--arch/hexagon/mm/uaccess.c8
-rw-r--r--arch/hexagon/mm/vm_fault.c3
-rw-r--r--arch/hexagon/mm/vm_tlb.c1
-rw-r--r--arch/loongarch/include/asm/pgtable.h1
-rw-r--r--arch/loongarch/kernel/numa.c28
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig5
-rw-r--r--arch/m68k/configs/multi_defconfig5
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/mips/Kbuild6
-rw-r--r--arch/mips/boot/compressed/dbg.c2
-rw-r--r--arch/mips/boot/compressed/decompress.c16
-rw-r--r--arch/mips/boot/compressed/decompress.h24
-rw-r--r--arch/mips/boot/compressed/string.c1
-rw-r--r--arch/mips/include/asm/cache.h6
-rw-r--r--arch/mips/include/asm/jump_label.h3
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/include/asm/mmzone.h2
-rw-r--r--arch/mips/include/asm/pgtable.h1
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/setup.h1
-rw-r--r--arch/mips/include/asm/signal.h1
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/spram.h2
-rw-r--r--arch/mips/include/asm/syscalls.h33
-rw-r--r--arch/mips/include/asm/tlbex.h1
-rw-r--r--arch/mips/include/asm/traps.h24
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/kernel/cpu-probe.c1
-rw-r--r--arch/mips/kernel/cpu-r3k-probe.c1
-rw-r--r--arch/mips/kernel/linux32.c1
-rw-r--r--arch/mips/kernel/machine_kexec.c1
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c1
-rw-r--r--arch/mips/kernel/mips-mt.c1
-rw-r--r--arch/mips/kernel/module.c3
-rw-r--r--arch/mips/kernel/r4k-bugs64.c1
-rw-r--r--arch/mips/kernel/signal-common.h3
-rw-r--r--arch/mips/kernel/signal.c1
-rw-r--r--arch/mips/kernel/signal32.c1
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/signal_o32.c1
-rw-r--r--arch/mips/kernel/smp.c3
-rw-r--r--arch/mips/kernel/spram.c1
-rw-r--r--arch/mips/kernel/syscall.c1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl5
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl5
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl5
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kernel/unaligned.c1
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/cache.c15
-rw-r--r--arch/mips/mm/fault.c1
-rw-r--r--arch/mips/mm/init.c1
-rw-r--r--arch/mips/mm/pgtable-64.c2
-rw-r--r--arch/mips/mm/tlb-r3k.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/mips/power/cpu.c1
-rw-r--r--arch/mips/power/hibernate.c1
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/parisc/kernel/kexec_file.c8
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/Makefile25
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023si-post.dtsi79
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi71
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h9
-rw-r--r--arch/powerpc/include/asm/ftrace.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h20
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h10
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h1
-rw-r--r--arch/powerpc/include/asm/linkage.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/include/asm/mmzone.h8
-rw-r--r--arch/powerpc/include/asm/papr-sysparm.h17
-rw-r--r--arch/powerpc/include/asm/paravirt.h33
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h5
-rw-r--r--arch/powerpc/include/asm/ps3.h6
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/reg_a2.h154
-rw-r--r--arch/powerpc/include/asm/rtas.h91
-rw-r--r--arch/powerpc/include/uapi/asm/papr-miscdev.h9
-rw-r--r--arch/powerpc/include/uapi/asm/papr-sysparm.h58
-rw-r--r--arch/powerpc/include/uapi/asm/papr-vpd.h22
-rw-r--r--arch/powerpc/kernel/cpu_specs_book3s_64.h15
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S1
-rw-r--r--arch/powerpc/kernel/rtas.c207
-rw-r--r--arch/powerpc/kernel/rtas_pci.c8
-rw-r--r--arch/powerpc/kernel/smp.c124
-rw-r--r--arch/powerpc/kernel/swsusp_64.c2
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/powerpc/kernel/trace/ftrace_entry.S2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c1
-rw-r--r--arch/powerpc/kernel/vdso/Makefile2
-rw-r--r--arch/powerpc/kexec/core.c1
-rw-r--r--arch/powerpc/kexec/core_64.c3
-rw-r--r--arch/powerpc/kexec/elf_64.c8
-rw-r--r--arch/powerpc/kexec/file_load_64.c18
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv.c72
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c29
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c21
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/sstep.c14
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c7
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c3
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/init-common.c5
-rw-r--r--arch/powerpc/mm/mmu_decl.h5
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/hv-gpci.c3
-rw-r--r--arch/powerpc/perf/imc-pmu.c6
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/idle.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c2
-rw-r--r--arch/powerpc/platforms/512x/pdm360ng.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c5
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c2
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig7
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c3
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig12
-rw-r--r--arch/powerpc/platforms/ps3/Makefile2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c1
-rw-r--r--arch/powerpc/platforms/ps3/gelic_udbg.c1
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c18
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c16
-rw-r--r--arch/powerpc/platforms/pseries/papr-sysparm.c205
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c541
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h1
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c1
-rw-r--r--arch/powerpc/sysdev/grackle.c19
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c2
-rw-r--r--arch/riscv/include/asm/pgtable.h1
-rw-r--r--arch/riscv/include/asm/topology.h1
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/elf_kexec.c11
-rw-r--r--arch/riscv/kernel/machine_kexec.c26
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c2
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/sparc/prom/Makefile1
-rw-r--r--arch/um/kernel/um_arch.c4
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Kconfig.cpu6
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/ident_map_64.c5
-rw-r--r--arch/x86/boot/compressed/idt_64.c1
-rw-r--r--arch/x86/boot/compressed/idt_handlers_64.S1
-rw-r--r--arch/x86/boot/compressed/mem.c2
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/boot/pm.c7
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/coco/tdx/tdx.c2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S2
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S2
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S2
-rw-r--r--arch/x86/entry/calling.h12
-rw-r--r--arch/x86/entry/entry_64.S33
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl5
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl5
-rw-r--r--arch/x86/events/amd/brs.c2
-rw-r--r--arch/x86/events/amd/core.c4
-rw-r--r--arch/x86/events/amd/ibs.c3
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c154
-rw-r--r--arch/x86/events/intel/cstate.c158
-rw-r--r--arch/x86/events/intel/ds.c4
-rw-r--r--arch/x86/events/intel/lbr.c85
-rw-r--r--arch/x86/events/intel/uncore.c12
-rw-r--r--arch/x86/events/intel/uncore.h10
-rw-r--r--arch/x86/events/intel/uncore_discovery.c5
-rw-r--r--arch/x86/events/intel/uncore_discovery.h2
-rw-r--r--arch/x86/events/intel/uncore_nhmex.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c208
-rw-r--r--arch/x86/events/perf_event.h12
-rw-r--r--arch/x86/events/perf_event_flags.h2
-rw-r--r--arch/x86/hyperv/hv_apic.c2
-rw-r--r--arch/x86/hyperv/irqdomain.c2
-rw-r--r--arch/x86/hyperv/ivm.c2
-rw-r--r--arch/x86/include/asm/alternative.h30
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/apicdef.h276
-rw-r--r--arch/x86/include/asm/barrier.h18
-rw-r--r--arch/x86/include/asm/cpufeatures.h8
-rw-r--r--arch/x86/include/asm/desc_defs.h78
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/extable_fixup_types.h2
-rw-r--r--arch/x86/include/asm/fpu/types.h2
-rw-r--r--arch/x86/include/asm/ia32.h11
-rw-r--r--arch/x86/include/asm/io.h8
-rw-r--r--arch/x86/include/asm/iosf_mbi.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mce.h4
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/mwait.h20
-rw-r--r--arch/x86/include/asm/nospec-branch.h4
-rw-r--r--arch/x86/include/asm/paravirt.h79
-rw-r--r--arch/x86/include/asm/paravirt_types.h87
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/include/asm/pgtable.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h4
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/text-patching.h12
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h2
-rw-r--r--arch/x86/include/asm/xen/interface_64.h2
-rw-r--r--arch/x86/include/uapi/asm/amd_hsmp.h2
-rw-r--r--arch/x86/kernel/alternative.c136
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/apic/Makefile2
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/apic_noop.c1
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/probe_32.c1
-rw-r--r--arch/x86/kernel/apic/vector.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c1
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/callthunks.c17
-rw-r--r--arch/x86/kernel/cpu/amd.c268
-rw-r--r--arch/x86/kernel/cpu/common.c57
-rw-r--r--arch/x86/kernel/cpu/hygon.c3
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c2
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c80
-rw-r--r--arch/x86/kernel/cpu/mce/core.c72
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c1
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c304
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h66
-rw-r--r--arch/x86/kernel/cpu/mce/threshold.c115
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c14
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c2
-rw-r--r--arch/x86/kernel/crash.c16
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/head64.c6
-rw-r--r--arch/x86/kernel/head_64.S37
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c23
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/machine_kexec_64.c7
-rw-r--r--arch/x86/kernel/module.c20
-rw-r--r--arch/x86/kernel/paravirt.c54
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kernel/sev-shared.c2
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/kernel/vmlinux.lds.S13
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c4
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c2
-rw-r--r--arch/x86/kvm/svm/svm.c2
-rw-r--r--arch/x86/kvm/vmx/nested.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/kvm/xen.c2
-rw-r--r--arch/x86/lib/csum-partial_64.c105
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/misc.c2
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/numa.c34
-rw-r--r--arch/x86/mm/pat/memtype.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c4
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/net/bpf_jit_comp32.c2
-rw-r--r--arch/x86/pci/sta2x11-fixup.c1
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c2
-rw-r--r--arch/x86/platform/pvh/head.S9
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c2
-rw-r--r--arch/x86/platform/uv/uv_time.c2
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/realmode/rm/reboot.S3
-rw-r--r--arch/x86/tools/Makefile2
-rw-r--r--arch/x86/tools/chkobjdump.awk34
-rw-r--r--arch/x86/tools/objdump_reformat.awk6
-rw-r--r--arch/x86/tools/relocs.c2
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/include/asm/kasan.h2
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl5
-rw-r--r--block/Kconfig20
-rw-r--r--block/bdev.c258
-rw-r--r--block/fops.c23
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accel/qaic/mhi_controller.c15
-rw-r--r--drivers/accel/qaic/qaic_data.c8
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_extlog.c12
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_lpss.c51
-rw-r--r--drivers/acpi/acpi_video.c77
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/apei/einj.c71
-rw-r--r--drivers/acpi/apei/ghes.c29
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/thermal_cpufreq.c22
-rw-r--r--drivers/acpi/bus.c32
-rw-r--r--drivers/acpi/button.c10
-rw-r--r--drivers/acpi/cppc_acpi.c104
-rw-r--r--drivers/acpi/ec.c116
-rw-r--r--drivers/acpi/internal.h28
-rw-r--r--drivers/acpi/mipi-disco-img.c725
-rw-r--r--drivers/acpi/numa/srat.c10
-rw-r--r--drivers/acpi/osl.c77
-rw-r--r--drivers/acpi/processor_thermal.c49
-rw-r--r--drivers/acpi/property.c102
-rw-r--r--drivers/acpi/resource.c40
-rw-r--r--drivers/acpi/scan.c61
-rw-r--r--drivers/acpi/thermal.c80
-rw-r--r--drivers/acpi/thermal_lib.c (renamed from drivers/thermal/thermal_acpi.c)80
-rw-r--r--drivers/acpi/utils.c164
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/android/binder_alloc.c7
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/arch_topology.c56
-rw-r--r--drivers/base/power/main.c148
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap-kunit.c60
-rw-r--r--drivers/base/regmap/regmap-ram.c4
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c31
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/ublk_drv.c9
-rw-r--r--drivers/block/zram/Kconfig15
-rw-r--r--drivers/block/zram/zram_drv.c57
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/connector/cn_proc.c5
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c139
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c15
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c7
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c9
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/hisilicon/sgl.c6
-rw-r--r--drivers/dax/bus.c3
-rw-r--r--drivers/dax/bus.h1
-rw-r--r--drivers/dax/cxl.c1
-rw-r--r--drivers/dax/dax-private.h1
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c8
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/devfreq/devfreq.c80
-rw-r--r--drivers/edac/altera_edac.c21
-rw-r--r--drivers/edac/amd64_edac.c66
-rw-r--r--drivers/edac/amd64_edac.h1
-rw-r--r--drivers/edac/armada_xp_edac.c16
-rw-r--r--drivers/edac/aspeed_edac.c6
-rw-r--r--drivers/edac/bluefield_edac.c6
-rw-r--r--drivers/edac/cell_edac.c5
-rw-r--r--drivers/edac/cpc925_edac.c6
-rw-r--r--drivers/edac/dmc520_edac.c6
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_pci_sysfs.c4
-rw-r--r--drivers/edac/fsl_ddr_edac.c3
-rw-r--r--drivers/edac/fsl_ddr_edac.h2
-rw-r--r--drivers/edac/highbank_l2_edac.c5
-rw-r--r--drivers/edac/highbank_mc_edac.c5
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/igen6_edac.c194
-rw-r--r--drivers/edac/layerscape_edac.c2
-rw-r--r--drivers/edac/mce_amd.c526
-rw-r--r--drivers/edac/mpc85xx_edac.c13
-rw-r--r--drivers/edac/npcm_edac.c6
-rw-r--r--drivers/edac/octeon_edac-l2c.c6
-rw-r--r--drivers/edac/octeon_edac-lmc.c5
-rw-r--r--drivers/edac/octeon_edac-pc.c5
-rw-r--r--drivers/edac/octeon_edac-pci.c6
-rw-r--r--drivers/edac/pnd2_edac.c55
-rw-r--r--drivers/edac/ppc4xx_edac.c7
-rw-r--r--drivers/edac/qcom_edac.c6
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/edac/skx_common.c4
-rw-r--r--drivers/edac/synopsys_edac.c6
-rw-r--r--drivers/edac/thunderx_edac.c10
-rw-r--r--drivers/edac/ti_edac.c6
-rw-r--r--drivers/edac/xgene_edac.c6
-rw-r--r--drivers/edac/zynqmp_edac.c6
-rw-r--r--drivers/firewire/ohci.c51
-rw-r--r--drivers/firmware/efi/dev-path-parser.c7
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c2
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c110
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h80
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c153
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c7
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c4
-rw-r--r--drivers/gpu/drm/drm_syncobj.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c13
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c39
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c86
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c22
-rw-r--r--drivers/i2c/i2c-core.h4
-rw-r--r--drivers/idle/intel_idle.c133
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/input/rmi4/rmi_spi.c2
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/dma-iommu.c2
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c26
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c110
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c31
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/rave-sp.c4
-rw-r--r--drivers/mfd/tps6594-spi.c2
-rw-r--r--drivers/misc/cxl/cxl.h3
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/ocxl/link.c14
-rw-r--r--drivers/misc/ocxl/main.c2
-rw-r--r--drivers/mmc/core/block.c7
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c26
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c10
-rw-r--r--drivers/mtd/maps/vmu-flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c408
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c10
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c8
-rw-r--r--drivers/mtd/nand/raw/nand_base.c97
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c7
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c13
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/spi-nor/atmel.c16
-rw-r--r--drivers/mtd/spi-nor/core.c177
-rw-r--r--drivers/mtd/spi-nor/core.h24
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/micron-st.c59
-rw-r--r--drivers/mtd/spi-nor/sfdp.c29
-rw-r--r--drivers/mtd/spi-nor/sfdp.h7
-rw-r--r--drivers/mtd/spi-nor/spansion.c4
-rw-r--r--drivers/mtd/spi-nor/sst.c6
-rw-r--r--drivers/mtd/spi-nor/swp.c25
-rw-r--r--drivers/mtd/spi-nor/sysfs.c2
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c9
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c65
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h6
-rw-r--r--drivers/net/usb/ax88172a.c4
-rw-r--r--drivers/net/virtio_net.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/opp/core.c294
-rw-r--r--drivers/opp/of.c57
-rw-r--r--drivers/opp/opp.h24
-rw-r--r--drivers/opp/ti-opp-supply.c13
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/controller/pci-hyperv.c7
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aspm.c84
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c6
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c4
-rw-r--r--drivers/perf/arm_dsu_pmu.c6
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/perf/arm_pmuv3.c242
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/dwc_pcie_pmu.c792
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c45
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c4
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c2
-rw-r--r--drivers/platform/mips/rs780e-acpi.c12
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c30
-rw-r--r--drivers/platform/x86/p2sb.c172
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c12
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/rapidio/devices/tsi721.c67
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c4
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/arizona-ldo1.c8
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c5
-rw-r--r--drivers/regulator/core.c130
-rw-r--r--drivers/regulator/db8500-prcmu.c6
-rw-r--r--drivers/regulator/event.c91
-rw-r--r--drivers/regulator/of_regulator.c9
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c177
-rw-r--r--drivers/regulator/qcom_smd-regulator.c35
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c34
-rw-r--r--drivers/regulator/regnl.h13
-rw-r--r--drivers/regulator/stm32-vrefbuf.c6
-rw-r--r--drivers/regulator/stpmic1_regulator.c2
-rw-r--r--drivers/regulator/uniphier-regulator.c6
-rw-r--r--drivers/regulator/userspace-consumer.c6
-rw-r--r--drivers/regulator/virtual.c6
-rw-r--r--drivers/regulator/wm8350-regulator.c6
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/atmel-quadspi.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c519
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c4
-rw-r--r--drivers/spi/spi-cadence-xspi.c1
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c1
-rw-r--r--drivers/spi/spi-geni-qcom.c96
-rw-r--r--drivers/spi/spi-ingenic.c15
-rw-r--r--drivers/spi/spi-intel.c10
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-mpc52xx.c1
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi-pl022.c382
-rw-r--r--drivers/spi/spi-sh-msiof.c17
-rw-r--r--drivers/spi/spi-sprd-adi.c32
-rw-r--r--drivers/spi/spi-sprd.c4
-rw-r--r--drivers/spi/spi-st-ssc4.c70
-rw-r--r--drivers/spi/spi-stm32-qspi.c18
-rw-r--r--drivers/spi/spi-stm32.c638
-rw-r--r--drivers/spi/spi-sun4i.c72
-rw-r--r--drivers/spi/spi-sun6i.c148
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c88
-rw-r--r--drivers/spi/spi-synquacer.c82
-rw-r--r--drivers/spi/spi-tegra114.c118
-rw-r--r--drivers/spi/spi-tegra20-sflash.c76
-rw-r--r--drivers/spi/spi-tegra20-slink.c98
-rw-r--r--drivers/spi/spi-tegra210-quad.c80
-rw-r--r--drivers/spi/spi-ti-qspi.c103
-rw-r--r--drivers/spi/spi-topcliff-pch.c226
-rw-r--r--drivers/spi/spi-uniphier.c194
-rw-r--r--drivers/spi/spi-wpcm-fiu.c4
-rw-r--r--drivers/spi/spi-xcomm.c32
-rw-r--r--drivers/spi/spi-xilinx.c58
-rw-r--r--drivers/spi/spi-xlp.c40
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c30
-rw-r--r--drivers/spi/spi-zynq-qspi.c28
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c50
-rw-r--r--drivers/spi/spi.c262
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/amlogic_thermal.c19
-rw-r--r--drivers/thermal/cpuidle_cooling.c4
-rw-r--r--drivers/thermal/gov_power_allocator.c364
-rw-r--r--drivers/thermal/intel/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c8
-rw-r--r--drivers/thermal/intel/intel_hfi.c91
-rw-r--r--drivers/thermal/loongson2_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c529
-rw-r--r--drivers/thermal/sun8i_thermal.c13
-rw-r--r--drivers/thermal/thermal_core.c193
-rw-r--r--drivers/thermal/thermal_core.h9
-rw-r--r--drivers/thermal/thermal_helpers.c17
-rw-r--r--drivers/thermal/thermal_hwmon.c5
-rw-r--r--drivers/thermal/thermal_netlink.c44
-rw-r--r--drivers/thermal/thermal_of.c6
-rw-r--r--drivers/thermal/thermal_sysfs.c121
-rw-r--r--drivers/thermal/thermal_trace_ipa.h50
-rw-r--r--drivers/thermal/thermal_trip.c80
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c8
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c12
-rw-r--r--drivers/vfio/platform/vfio_platform_irq.c4
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c6
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/virt/acrn/ioeventfd.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c6
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c8
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--fs/Kconfig26
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/inode.c11
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/aio.c87
-rw-r--r--fs/attr.c2
-rw-r--r--fs/backing-file.c336
-rw-r--r--fs/bcachefs/Makefile2
-rw-r--r--fs/bcachefs/acl.c3
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/bcachefs_format.h51
-rw-r--r--fs/bcachefs/btree_iter.c35
-rw-r--r--fs/bcachefs/darray.c24
-rw-r--r--fs/bcachefs/darray.h48
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/error.c3
-rw-r--r--fs/bcachefs/fs-io-direct.c13
-rw-r--r--fs/bcachefs/fs-ioctl.c16
-rw-r--r--fs/bcachefs/fs.c40
-rw-r--r--fs/bcachefs/io_write.c82
-rw-r--r--fs/bcachefs/printbuf.c22
-rw-r--r--fs/bcachefs/printbuf.h2
-rw-r--r--fs/bcachefs/recovery.c137
-rw-r--r--fs/bcachefs/recovery.h3
-rw-r--r--fs/bcachefs/recovery_types.h86
-rw-r--r--fs/bcachefs/sb-clean.c2
-rw-r--r--fs/bcachefs/sb-downgrade.c188
-rw-r--r--fs/bcachefs/sb-downgrade.h10
-rw-r--r--fs/bcachefs/sb-errors.c6
-rw-r--r--fs/bcachefs/sb-errors.h253
-rw-r--r--fs/bcachefs/sb-errors_types.h253
-rw-r--r--fs/bcachefs/subvolume.c18
-rw-r--r--fs/bcachefs/subvolume.h3
-rw-r--r--fs/bcachefs/super-io.c105
-rw-r--r--fs/bcachefs/super-io.h12
-rw-r--r--fs/bcachefs/super_types.h1
-rw-r--r--fs/bcachefs/util.h1
-rw-r--r--fs/bcachefs/xattr.c3
-rw-r--r--fs/bfs/file.c9
-rw-r--r--fs/btrfs/extent_io.c52
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c12
-rw-r--r--fs/btrfs/subpage.c4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/buffer.c283
-rw-r--r--fs/cachefiles/daemon.c15
-rw-r--r--fs/cachefiles/interface.c7
-rw-r--r--fs/cachefiles/internal.h59
-rw-r--r--fs/cachefiles/io.c5
-rw-r--r--fs/cachefiles/ondemand.c166
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/file.c13
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/dax.c2
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/debugfs/file.c28
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/eventfd.c46
-rw-r--r--fs/exec.c3
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext4/inline.c3
-rw-r--r--fs/ext4/inode.c10
-rw-r--r--fs/ext4/ioctl.c4
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/f2fs/compress.c2
-rw-r--r--fs/f2fs/file.c4
-rw-r--r--fs/f2fs/inode.c2
-rw-r--r--fs/file.c97
-rw-r--r--fs/file_table.c22
-rw-r--r--fs/freevxfs/vxfs_bmap.c8
-rw-r--r--fs/freevxfs/vxfs_immed.c2
-rw-r--r--fs/freevxfs/vxfs_lookup.c3
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/gfs2/aops.c47
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c6
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/wrapper.c5
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/inode.c26
-rw-r--r--fs/internal.h13
-rw-r--r--fs/ioctl.c3
-rw-r--r--fs/iomap/buffered-io.c14
-rw-r--r--fs/jffs2/debug.c2
-rw-r--r--fs/minix/inode.c9
-rw-r--r--fs/mnt_idmapping.c159
-rw-r--r--fs/mount.h27
-rw-r--r--fs/mpage.c62
-rw-r--r--fs/namei.c31
-rw-r--r--fs/namespace.c630
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/nfs42xattr.c8
-rw-r--r--fs/nfs/nfs4file.c5
-rw-r--r--fs/nfs/write.c12
-rw-r--r--fs/nfsd/filecache.c4
-rw-r--r--fs/nfsd/nfsctl.c31
-rw-r--r--fs/nfsd/nfsd.h7
-rw-r--r--fs/nfsd/vfs.c7
-rw-r--r--fs/nilfs2/btnode.c62
-rw-r--r--fs/nilfs2/cpfile.c28
-rw-r--r--fs/nilfs2/dir.c244
-rw-r--r--fs/nilfs2/file.c28
-rw-r--r--fs/nilfs2/gcinode.c4
-rw-r--r--fs/nilfs2/inode.c15
-rw-r--r--fs/nilfs2/ioctl.c10
-rw-r--r--fs/nilfs2/mdt.c23
-rw-r--r--fs/nilfs2/namei.c38
-rw-r--r--fs/nilfs2/nilfs.h20
-rw-r--r--fs/nilfs2/page.c93
-rw-r--r--fs/nilfs2/page.h12
-rw-r--r--fs/nilfs2/segment.c158
-rw-r--r--fs/nilfs2/sufile.c9
-rw-r--r--fs/nilfs2/super.c8
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/dir.c3
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c17
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/open.c47
-rw-r--r--fs/overlayfs/Kconfig1
-rw-r--r--fs/overlayfs/copy_up.c30
-rw-r--r--fs/overlayfs/file.c247
-rw-r--r--fs/overlayfs/overlayfs.h8
-rw-r--r--fs/overlayfs/super.c12
-rw-r--r--fs/pipe.c24
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/posix_acl.c4
-rw-r--r--fs/proc/base.c29
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/task_mmu.c23
-rw-r--r--fs/proc_namespace.c13
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/read_write.c235
-rw-r--r--fs/readdir.c4
-rw-r--r--fs/reiserfs/stree.c2
-rw-r--r--fs/remap_range.c45
-rw-r--r--fs/smb/client/cifsfs.c5
-rw-r--r--fs/smb/client/cifsglob.h1
-rw-r--r--fs/smb/client/connect.c27
-rw-r--r--fs/smb/client/file.c6
-rw-r--r--fs/smb/client/smb2ops.c34
-rw-r--r--fs/splice.c243
-rw-r--r--fs/squashfs/file.c3
-rw-r--r--fs/squashfs/file_direct.c6
-rw-r--r--fs/stat.c11
-rw-r--r--fs/super.c500
-rw-r--r--fs/sysv/itree.c9
-rw-r--r--fs/tracefs/inode.c4
-rw-r--r--fs/tracefs/internal.h2
-rw-r--r--fs/ufs/inode.c11
-rw-r--r--fs/userfaultfd.c72
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_fsops.c4
-rw-r--r--fs/xfs/xfs_qm.c2
-rw-r--r--fs/xfs/xfs_super.c24
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--include/acpi/acpi_bus.h169
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/numa.h2
-rw-r--r--include/asm-generic/unaligned.h24
-rw-r--r--include/drm/ttm/ttm_pool.h2
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/async.h2
-rw-r--r--include/linux/backing-file.h42
-rw-r--r--include/linux/blk_types.h8
-rw-r--r--include/linux/blkdev.h29
-rw-r--r--include/linux/buffer_head.h9
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cleanup.h52
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/cpuhotplug.h18
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/crash_core.h6
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/damon.h22
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/energy_model.h7
-rw-r--r--include/linux/entry-common.h95
-rw-r--r--include/linux/eventfd.h17
-rw-r--r--include/linux/evm.h6
-rw-r--r--include/linux/fdtable.h19
-rw-r--r--include/linux/file.h12
-rw-r--r--include/linux/fs.h127
-rw-r--r--include/linux/fsnotify.h50
-rw-r--r--include/linux/gfp_types.h17
-rw-r--r--include/linux/highmem.h76
-rw-r--r--include/linux/huge_mm.h184
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/kasan.h162
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/ksm.h10
-rw-r--r--include/linux/list_lru.h88
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/lsm_hook_defs.h6
-rw-r--r--include/linux/lsm_hooks.h17
-rw-r--r--include/linux/maple_tree.h349
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h37
-rw-r--r--include/linux/mempool.h1
-rw-r--r--include/linux/memremap.h12
-rw-r--r--include/linux/mm.h50
-rw-r--r--include/linux/mm_types.h30
-rw-r--r--include/linux/mmzone.h66
-rw-r--r--include/linux/mnt_idmapping.h3
-rw-r--r--include/linux/mount.h5
-rw-r--r--include/linux/mtd/rawnand.h15
-rw-r--r--include/linux/mutex.h3
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/page-flags.h9
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf/arm_pmu.h28
-rw-r--r--include/linux/perf/arm_pmuv3.h34
-rw-r--r--include/linux/perf_event.h22
-rw-r--r--include/linux/pgtable.h9
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/pm_opp.h28
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/property.h7
-rw-r--r--include/linux/reboot.h12
-rw-r--r--include/linux/regulator/consumer.h47
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h18
-rw-r--r--include/linux/rmap.h411
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/signal.h4
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/security.h56
-rw-r--r--include/linux/slab.h24
-rw-r--r--include/linux/slab_def.h124
-rw-r--r--include/linux/slub_def.h204
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/spi/spi.h65
-rw-r--r--include/linux/spinlock.h41
-rw-r--r--include/linux/splice.h51
-rw-r--r--include/linux/stackdepot.h61
-rw-r--r--include/linux/surface_aggregator/serial_hub.h4
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/syscalls.h14
-rw-r--r--include/linux/thermal.h26
-rw-r--r--include/linux/uidgid.h13
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/userfaultfd_k.h11
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h60
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/zswap.h32
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h2
-rw-r--r--include/net/scm.h9
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/tcp_ao.h26
-rw-r--r--include/trace/events/ksm.h33
-rw-r--r--include/trace/events/sched.h15
-rw-r--r--include/trace/events/timer.h40
-rw-r--r--include/uapi/asm-generic/unistd.h15
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/lsm.h90
-rw-r--r--include/uapi/linux/mount.h70
-rw-r--r--include/uapi/linux/perf_event.h13
-rw-r--r--include/uapi/linux/stat.h1
-rw-r--r--include/uapi/linux/userfaultfd.h29
-rw-r--r--include/uapi/regulator/regulator.h90
-rw-r--r--init/Kconfig102
-rw-r--r--init/init_task.c10
-rw-r--r--io_uring/alloc_cache.h5
-rw-r--r--io_uring/io_uring.c4
-rw-r--r--io_uring/openclose.c2
-rw-r--r--io_uring/splice.c4
-rw-r--r--kernel/async.c85
-rw-r--r--kernel/audit.c31
-rw-r--r--kernel/cgroup/cgroup-internal.h4
-rw-r--r--kernel/cgroup/cgroup-v1.c34
-rw-r--r--kernel/cgroup/cgroup.c45
-rw-r--r--kernel/cgroup/cpuset.c297
-rw-r--r--kernel/cgroup/rstat.c150
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/crash_core.c91
-rw-r--r--kernel/dma/pool.c6
-rw-r--r--kernel/dma/swiotlb.c4
-rw-r--r--kernel/entry/common.c108
-rw-r--r--kernel/events/core.c46
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/fork.c70
-rw-r--r--kernel/freezer.c1
-rw-r--r--kernel/kexec_core.c21
-rw-r--r--kernel/kexec_file.c20
-rw-r--r--kernel/locking/mutex.c5
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/hibernate.c10
-rw-r--r--kernel/power/main.c16
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/snapshot.c16
-rw-r--r--kernel/power/swap.c41
-rw-r--r--kernel/ptrace.c141
-rw-r--r--kernel/reboot.c51
-rw-r--r--kernel/relay.c162
-rw-r--r--kernel/resource.c57
-rw-r--r--kernel/sched/core.c140
-rw-r--r--kernel/sched/cpufreq_schedutil.c90
-rw-r--r--kernel/sched/deadline.c479
-rw-r--r--kernel/sched/debug.c18
-rw-r--r--kernel/sched/fair.c462
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/sched/idle.c30
-rw-r--r--kernel/sched/pelt.h4
-rw-r--r--kernel/sched/rt.c15
-rw-r--r--kernel/sched/sched.h146
-rw-r--r--kernel/sched/stop_task.c13
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/signal.c28
-rw-r--r--kernel/stacktrace.c2
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timer.c110
-rw-r--r--kernel/trace/trace_events_user.c4
-rw-r--r--kernel/user_namespace.c20
-rw-r--r--kernel/watch_queue.c2
-rw-r--r--kernel/watchdog.c40
-rw-r--r--kernel/workqueue.c167
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/Kconfig.kasan34
-rw-r--r--lib/Kconfig.kfence2
-rw-r--r--lib/Kconfig.kmsan2
-rw-r--r--lib/crc-ccitt.c55
-rw-r--r--lib/debugobjects.c200
-rw-r--r--lib/fw_table.c30
-rw-r--r--lib/iov_iter.c13
-rw-r--r--lib/maple_tree.c1088
-rw-r--r--lib/stackdepot.c461
-rw-r--r--lib/test_ida.c2
-rw-r--r--lib/test_maple_tree.c331
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/trace_readwrite.c2
-rw-r--r--lib/ubsan.c7
-rw-r--r--mm/Kconfig100
-rw-r--r--mm/Kconfig.debug16
-rw-r--r--mm/Makefile6
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c9
-rw-r--r--mm/damon/core-test.h60
-rw-r--r--mm/damon/core.c70
-rw-r--r--mm/damon/dbgfs-test.h2
-rw-r--r--mm/damon/dbgfs.c2
-rw-r--r--mm/damon/modules-common.c2
-rw-r--r--mm/damon/sysfs-common.h3
-rw-r--r--mm/damon/sysfs-schemes.c272
-rw-r--r--mm/damon/sysfs.c27
-rw-r--r--mm/damon/vaddr-test.h2
-rw-r--r--mm/damon/vaddr.c4
-rw-r--r--mm/debug_page_alloc.c2
-rw-r--r--mm/debug_vm_pgtable.c4
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c14
-rw-r--r--mm/folio-compat.c20
-rw-r--r--mm/gup.c4
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/huge_memory.c458
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/hugetlb_vmemmap.c276
-rw-r--r--mm/internal.h41
-rw-r--r--mm/kasan/common.c293
-rw-r--r--mm/kasan/generic.c175
-rw-r--r--mm/kasan/hw_tags.c8
-rw-r--r--mm/kasan/kasan.h96
-rw-r--r--mm/kasan/kasan_test.c793
-rw-r--r--mm/kasan/quarantine.c17
-rw-r--r--mm/kasan/report.c47
-rw-r--r--mm/kasan/report_generic.c6
-rw-r--r--mm/kasan/report_tags.c27
-rw-r--r--mm/kasan/shadow.c18
-rw-r--r--mm/kasan/tags.c24
-rw-r--r--mm/kfence/core.c4
-rw-r--r--mm/khugepaged.c73
-rw-r--r--mm/kmemleak.c186
-rw-r--r--mm/kmsan/core.c7
-rw-r--r--mm/kmsan/init.c8
-rw-r--r--mm/ksm.c388
-rw-r--r--mm/list_lru.c79
-rw-r--r--mm/madvise.c22
-rw-r--r--mm/memblock.c41
-rw-r--r--mm/memcontrol.c346
-rw-r--r--mm/memory-failure.c119
-rw-r--r--mm/memory.c296
-rw-r--r--mm/memory_hotplug.c219
-rw-r--r--mm/mempool.c81
-rw-r--r--mm/memremap.c32
-rw-r--r--mm/migrate.c45
-rw-r--r--mm/migrate_device.c64
-rw-r--r--mm/mm_init.c71
-rw-r--r--mm/mmap.c49
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/mmzone.c1
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page-writeback.c58
-rw-r--r--mm/page_alloc.c84
-rw-r--r--mm/page_io.c84
-rw-r--r--mm/page_isolation.c17
-rw-r--r--mm/page_owner.c16
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/page_reporting.c6
-rw-r--r--mm/page_vma_mapped.c3
-rw-r--r--mm/pagewalk.c29
-rw-r--r--mm/process_vm_access.c15
-rw-r--r--mm/readahead.c14
-rw-r--r--mm/rmap.c499
-rw-r--r--mm/shmem.c17
-rw-r--r--mm/show_mem.c8
-rw-r--r--mm/shrinker.c2
-rw-r--r--mm/shuffle.h2
-rw-r--r--mm/slab.c4026
-rw-r--r--mm/slab.h551
-rw-r--r--mm/slab_common.c231
-rw-r--r--mm/slub.c1192
-rw-r--r--mm/sparse.c17
-rw-r--r--mm/swap.h28
-rw-r--r--mm/swap_state.c121
-rw-r--r--mm/swapfile.c105
-rw-r--r--mm/truncate.c51
-rw-r--r--mm/userfaultfd.c627
-rw-r--r--mm/util.c8
-rw-r--r--mm/vmpressure.c2
-rw-r--r--mm/vmscan.c293
-rw-r--r--mm/vmstat.c21
-rw-r--r--mm/workingset.c46
-rw-r--r--mm/zsmalloc.c5
-rw-r--r--mm/zswap.c746
-rw-r--r--net/compat.c2
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c12
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_sigpool.c5
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/mac80211/debugfs_netdev.c9
-rw-r--r--net/mac80211/driver-ops.c14
-rw-r--r--net/mptcp/subflow.c13
-rw-r--r--net/netfilter/nf_nat_ovs.c3
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nft_immediate.c2
-rw-r--r--net/netlabel/netlabel_calipso.c49
-rw-r--r--net/nfc/llcp_core.c39
-rw-r--r--net/nfc/llcp_sock.c5
-rw-r--r--net/qrtr/ns.c4
-rw-r--r--net/sched/em_text.c4
-rw-r--r--net/smc/smc_diag.c3
-rw-r--r--net/smc/smc_ib.c2
-rw-r--r--net/socket.c8
-rw-r--r--samples/Kconfig6
-rw-r--r--samples/Makefile1
-rw-r--r--samples/cgroup/Makefile5
-rw-r--r--samples/cgroup/cgroup_event_listener.c (renamed from tools/cgroup/cgroup_event_listener.c)0
-rw-r--r--samples/cgroup/memcg_event_listener.c330
-rw-r--r--samples/vfio-mdev/mtty.c4
-rw-r--r--scripts/Makefile.extrawarn4
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/checkstack.pl45
-rwxr-xr-xscripts/decode_stacktrace.sh3
-rwxr-xr-xscripts/decodecode5
-rw-r--r--scripts/gdb/linux/page_owner.py58
-rw-r--r--scripts/gdb/linux/slab.py3
-rw-r--r--scripts/gdb/linux/stackdepot.py6
-rw-r--r--scripts/spelling.txt14
-rw-r--r--security/Makefile1
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--security/apparmor/include/procattr.h2
-rw-r--r--security/apparmor/lsm.c90
-rw-r--r--security/apparmor/mount.c4
-rw-r--r--security/apparmor/procattr.c10
-rw-r--r--security/bpf/hooks.c9
-rw-r--r--security/commoncap.c8
-rw-r--r--security/integrity/evm/evm_main.c42
-rw-r--r--security/integrity/ima/Kconfig10
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_kexec.c4
-rw-r--r--security/keys/encrypted-keys/encrypted.c4
-rw-r--r--security/keys/keyctl.c5
-rw-r--r--security/landlock/cred.c2
-rw-r--r--security/landlock/fs.c28
-rw-r--r--security/landlock/net.c2
-rw-r--r--security/landlock/ptrace.c2
-rw-r--r--security/landlock/ruleset.c7
-rw-r--r--security/landlock/setup.c6
-rw-r--r--security/landlock/setup.h1
-rw-r--r--security/loadpin/loadpin.c9
-rw-r--r--security/lockdown/lockdown.c8
-rw-r--r--security/lsm_syscalls.c120
-rw-r--r--security/safesetid/lsm.c9
-rw-r--r--security/security.c285
-rw-r--r--security/selinux/hooks.c209
-rw-r--r--security/selinux/include/audit.h1
-rw-r--r--security/selinux/include/avc.h41
-rw-r--r--security/selinux/include/avc_ss.h2
-rw-r--r--security/selinux/include/classmap.h342
-rw-r--r--security/selinux/include/conditional.h4
-rw-r--r--security/selinux/include/ima.h2
-rw-r--r--security/selinux/include/initial_sid_to_string.h57
-rw-r--r--security/selinux/include/netif.h4
-rw-r--r--security/selinux/include/netlabel.h53
-rw-r--r--security/selinux/include/objsec.h129
-rw-r--r--security/selinux/include/policycap.h2
-rw-r--r--security/selinux/include/policycap_names.h4
-rw-r--r--security/selinux/include/security.h161
-rw-r--r--security/selinux/include/xfrm.h4
-rw-r--r--security/selinux/selinuxfs.c144
-rw-r--r--security/selinux/ss/avtab.c101
-rw-r--r--security/selinux/ss/policydb.c38
-rw-r--r--security/selinux/ss/services.c13
-rw-r--r--security/smack/smack_lsm.c93
-rw-r--r--security/tomoyo/tomoyo.c10
-rw-r--r--security/yama/yama_lsm.c8
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c4
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c244
-rw-r--r--sound/soc/codecs/tas2781-comlib.c4
-rw-r--r--sound/soc/codecs/tas2781-i2c.c2
-rw-r--r--sound/soc/fsl/fsl_rpmsg.c10
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-adda.c2
-rw-r--r--sound/soc/meson/g12a-toacodec.c5
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c8
-rw-r--r--sound/soc/sof/intel/hda-codec.c18
-rw-r--r--sound/usb/mixer_scarlett2.c4
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/cgroup/Makefile11
-rw-r--r--tools/include/linux/rwsem.h4
-rw-r--r--tools/include/linux/spinlock.h1
-rw-r--r--tools/include/perf/arm_pmuv3.h43
-rw-r--r--tools/include/uapi/linux/fs.h1
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt2
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl5
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl5
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl5
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl5
-rw-r--r--tools/testing/memblock/linux/mmzone.h6
-rw-r--r--tools/testing/radix-tree/linux.c45
-rw-r--r--tools/testing/radix-tree/linux/maple_tree.h2
-rw-r--r--tools/testing/radix-tree/maple.c396
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c18
-rw-r--r--tools/testing/selftests/arm64/fp/sve-test.S10
-rw-r--r--tools/testing/selftests/arm64/fp/vec-syscfg.c14
-rw-r--r--tools/testing/selftests/arm64/fp/za-test.S6
-rw-r--r--tools/testing/selftests/arm64/fp/zt-test.S5
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh222
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c2
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c74
-rw-r--r--tools/testing/selftests/damon/Makefile3
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py322
-rw-r--r--tools/testing/selftests/damon/access_memory.c41
-rwxr-xr-xtools/testing/selftests/damon/sysfs.sh27
-rw-r--r--tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py33
-rw-r--r--tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py55
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh6
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c182
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/log.h26
-rw-r--r--tools/testing/selftests/filesystems/statmount/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c612
-rw-r--r--tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c5
-rw-r--r--tools/testing/selftests/landlock/fs_test.c80
-rw-r--r--tools/testing/selftests/landlock/net_test.c59
-rw-r--r--tools/testing/selftests/lsm/.gitignore1
-rw-r--r--tools/testing/selftests/lsm/Makefile17
-rw-r--r--tools/testing/selftests/lsm/common.c89
-rw-r--r--tools/testing/selftests/lsm/common.h33
-rw-r--r--tools/testing/selftests/lsm/config3
-rw-r--r--tools/testing/selftests/lsm/lsm_get_self_attr_test.c275
-rw-r--r--tools/testing/selftests/lsm/lsm_list_modules_test.c137
-rw-r--r--tools/testing/selftests/lsm/lsm_set_self_attr_test.c74
-rw-r--r--tools/testing/selftests/mm/Makefile4
-rw-r--r--tools/testing/selftests/mm/compaction_test.c91
-rw-r--r--tools/testing/selftests/mm/cow.c183
-rw-r--r--tools/testing/selftests/mm/gup_test.c65
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c23
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c87
-rw-r--r--tools/testing/selftests/mm/khugepaged.c410
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh55
-rw-r--r--tools/testing/selftests/mm/thp_settings.c349
-rw-r--r--tools/testing/selftests/mm/thp_settings.h80
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c3
-rw-r--r--tools/testing/selftests/mm/uffd-common.c39
-rw-r--r--tools/testing/selftests/mm/uffd-common.h9
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c5
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c209
-rw-r--r--tools/testing/selftests/mm/vm_util.c80
-rw-r--r--tools/testing/selftests/powerpc/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/math/fpu.h25
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_asm.S48
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_preempt.c30
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_syscall.c8
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c10
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/Makefile12
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c196
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/Makefile12
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c352
-rw-r--r--tools/thermal/tmon/tui.c2
-rw-r--r--usr/Kconfig6
-rw-r--r--virt/kvm/eventfd.c4
1497 files changed, 38359 insertions, 24334 deletions
diff --git a/.mailmap b/.mailmap
index 9a9069235f0d..03587c433097 100644
--- a/.mailmap
+++ b/.mailmap
@@ -377,7 +377,7 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
-Mathieu Othacehe <m.othacehe@gmail.com>
+Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org>
Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
@@ -478,6 +478,8 @@ Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.vnet.ibm.com>
Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
Paul Mackerras <paulus@ozlabs.org> <paulus@samba.org>
Paul Mackerras <paulus@ozlabs.org> <paulus@au1.ibm.com>
+Paul Moore <paul@paul-moore.com> <paul.moore@hp.com>
+Paul Moore <paul@paul-moore.com> <pmoore@redhat.com>
Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Oruba <peter.oruba@amd.com>
@@ -542,6 +544,8 @@ Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
+Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
+Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
@@ -638,4 +642,5 @@ Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
+Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
diff --git a/CREDITS b/CREDITS
index 81845c39e3cf..d61ba9b5117f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,10 +9,6 @@
Linus
----------
-N: Matt Mackal
-E: mpm@selenic.com
-D: SLOB slab allocator
-
N: Matti Aarnio
E: mea@nic.funet.fi
D: Alpha systems hacking, IPv6 and other network related stuff
@@ -323,6 +319,9 @@ N: Ohad Ben Cohen
E: ohad@wizery.com
D: Remote Processor (remoteproc) subsystem
D: Remote Processor Messaging (rpmsg) subsystem
+D: Hardware spinlock (hwspinlock) subsystem
+D: OMAP hwspinlock driver
+D: OMAP remoteproc driver
N: Krzysztof Benedyczak
E: golbi@mat.uni.torun.pl
@@ -1572,6 +1571,10 @@ S: Ampferstr. 50 / 4
S: 6020 Innsbruck
S: Austria
+N: Mark Hemment
+E: markhe@nextd.demon.co.uk
+D: SLAB allocator implementation
+
N: Richard Henderson
E: rth@twiddle.net
E: rth@cygnus.com
@@ -1855,6 +1858,10 @@ D: Fedora kernel maintenance (2003-2014).
D: 'Trinity' and similar fuzz testing work.
D: Misc/Other.
+N: Tom Joseph
+E: tjoseph@cadence.com
+D: Cadence PCIe driver
+
N: Martin Josfsson
E: gandalf@wlug.westbo.se
P: 1024D/F6B6D3B1 7610 7CED 5C34 4AA6 DBA2 8BE1 5A6D AF95 F6B6 D3B1
@@ -2126,6 +2133,10 @@ S: 2213 La Terrace Circle
S: San Jose, CA 95123
S: USA
+N: Mike Kravetz
+E: mike.kravetz@oracle.com
+D: Maintenance and development of the hugetlb subsystem
+
N: Andreas S. Krebs
E: akrebs@altavista.net
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
@@ -2437,6 +2448,10 @@ D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
D: Altera SoCFPGA and Nokia N900 support.
S: Czech Republic
+N: Olivia Mackall
+E: olivia@selenic.com
+D: SLOB slab allocator
+
N: Paul Mackerras
E: paulus@samba.org
D: PPP driver
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps
index 8757dcf41c08..a5f506f7d481 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps
@@ -16,3 +16,9 @@ Description:
Example output in powerpc:
grep . /sys/bus/event_source/devices/cpu/caps/*
/sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9
+
+ The "branch_counter_nr" in the supported platform exposes the
+ maximum number of counters which can be shown in the u64 counters
+ of PERF_SAMPLE_BRANCH_COUNTERS, while the "branch_counter_width"
+ exposes the width of each counter. Both of them can be used by
+ the perf tool to parse the logged counters in each branch.
diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
index c800621eff95..9ed5582ddea2 100644
--- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
+++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
@@ -25,6 +25,9 @@ KernelVersion: 5.14
Contact: linux-mtd@lists.infradead.org
Description: (RO) Part name of the SPI NOR flash.
+ The attribute is optional. User space should not rely on
+ it to be present or even correct. Instead, user space
+ should read the jedec_id attribute.
What: /sys/bus/spi/devices/.../spi-nor/sfdp
Date: April 2021
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
index 5e6b74f30406..1e7e0bb4c14e 100644
--- a/Documentation/ABI/testing/sysfs-class-devfreq
+++ b/Documentation/ABI/testing/sysfs-class-devfreq
@@ -52,6 +52,9 @@ Description:
echo 0 > /sys/class/devfreq/.../trans_stat
+ If the transition table is bigger than PAGE_SIZE, reading
+ this will return an -EFBIG error.
+
What: /sys/class/devfreq/.../available_frequencies
Date: October 2012
Contact: Nishanth Menon <nm@ti.com>
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index b35649a46a2f..bfa5b8288d8d 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -25,12 +25,14 @@ Description: Writing 'on' or 'off' to this file makes the kdamond starts or
stops, respectively. Reading the file returns the keywords
based on the current status. Writing 'commit' to this file
makes the kdamond reads the user inputs in the sysfs files
- except 'state' again. Writing 'update_schemes_stats' to the
- file updates contents of schemes stats files of the kdamond.
- Writing 'update_schemes_tried_regions' to the file updates
- contents of 'tried_regions' directory of every scheme directory
- of this kdamond. Writing 'update_schemes_tried_bytes' to the
- file updates only '.../tried_regions/total_bytes' files of this
+ except 'state' again. Writing 'commit_schemes_quota_goals' to
+ this file makes the kdamond reads the quota goal files again.
+ Writing 'update_schemes_stats' to the file updates contents of
+ schemes stats files of the kdamond. Writing
+ 'update_schemes_tried_regions' to the file updates contents of
+ 'tried_regions' directory of every scheme directory of this
+ kdamond. Writing 'update_schemes_tried_bytes' to the file
+ updates only '.../tried_regions/total_bytes' files of this
kdamond. Writing 'clear_schemes_tried_regions' to the file
removes contents of the 'tried_regions' directory.
@@ -212,6 +214,25 @@ Contact: SeongJae Park <sj@kernel.org>
Description: Writing to and reading from this file sets and gets the quotas
charge reset interval of the scheme in milliseconds.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/goals/nr_goals
+Date: Nov 2023
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a number 'N' to this file creates the number of
+ directories for setting automatic tuning of the scheme's
+ aggressiveness named '0' to 'N-1' under the goals/ directory.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/goals/<G>/target_value
+Date: Nov 2023
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing to and reading from this file sets and gets the target
+ value of the goal metric.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/goals/<G>/current_value
+Date: Nov 2023
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing to and reading from this file sets and gets the current
+ value of the goal metric.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/quotas/weights/sz_permil
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
diff --git a/Documentation/RAS/ras.rst b/Documentation/RAS/ras.rst
new file mode 100644
index 000000000000..2556b397cd27
--- /dev/null
+++ b/Documentation/RAS/ras.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Reliability, Availability and Serviceability features
+=====================================================
+
+This documents different aspects of the RAS functionality present in the
+kernel.
+
+Error decoding
+---------------
+
+* x86
+
+Error decoding on AMD systems should be done using the rasdaemon tool:
+https://github.com/mchehab/rasdaemon/
+
+While the daemon is running, it would automatically log and decode
+errors. If not, one can still decode such errors by supplying the
+hardware information from the error::
+
+ $ rasdaemon -p --status <STATUS> --ipid <IPID> --smca
+
+Also, the user can pass particular family and model to decode the error
+string::
+
+ $ rasdaemon -p --status <STATUS> --ipid <IPID> --smca --family <CPU Family> --model <CPU Model> --bank <BANK_NUM>
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index e4551579cb12..ee2b0030d416 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -328,7 +328,7 @@ as idle::
From now on, any pages on zram are idle pages. The idle mark
will be removed until someone requests access of the block.
IOW, unless there is access request, those pages are still idle pages.
-Additionally, when CONFIG_ZRAM_MEMORY_TRACKING is enabled pages can be
+Additionally, when CONFIG_ZRAM_TRACK_ENTRY_ACTIME is enabled pages can be
marked as idle based on how long (in seconds) it's been since they were
last accessed::
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 3f85254f3cef..17e6e9565156 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1093,7 +1093,11 @@ All time durations are in microseconds.
A read-write single value file which exists on non-root
cgroups. The default is "100".
- The weight in the range [1, 10000].
+ For non idle groups (cpu.idle = 0), the weight is in the
+ range [1, 10000].
+
+ If the cgroup has been configured to be SCHED_IDLE (cpu.idle = 1),
+ then the weight will show as a 0.
cpu.weight.nice
A read-write single value file which exists on non-root
@@ -1157,6 +1161,16 @@ All time durations are in microseconds.
values similar to the sched_setattr(2). This maximum utilization
value is used to clamp the task specific maximum utilization clamp.
+ cpu.idle
+ A read-write single value file which exists on non-root cgroups.
+ The default is 0.
+
+ This is the cgroup analog of the per-task SCHED_IDLE sched policy.
+ Setting this value to a 1 will make the scheduling policy of the
+ cgroup SCHED_IDLE. The threads inside the cgroup will retain their
+ own relative priorities, but the cgroup itself will be treated as
+ very low priority relative to its peers.
+
Memory
@@ -1679,6 +1693,21 @@ PAGE_SIZE multiple when read back.
limit, it will refuse to take any more stores before existing
entries fault back in or are written out to disk.
+ memory.zswap.writeback
+ A read-write single value file. The default value is "1". The
+ initial value of the root cgroup is 1, and when a new cgroup is
+ created, it inherits the current value of its parent.
+
+ When this is set to 0, all swapping attempts to swapping devices
+ are disabled. This included both zswap writebacks, and swapping due
+ to zswap store failures. If the zswap store failures are recurring
+ (for e.g if the pages are incompressible), users can observe
+ reclaim inefficiency after disabling writeback (because the same
+ pages might be rejected again and again).
+
+ Note that this is subtly different from setting memory.swap.max to
+ 0, as it still allows for pages to be written to the zswap pool.
+
memory.pressure
A read-only nested-keyed file.
@@ -2316,6 +2345,13 @@ Cpuset Interface Files
treated to have an implicit value of "cpuset.cpus" in the
formation of local partition.
+ cpuset.cpus.isolated
+ A read-only and root cgroup only multiple values file.
+
+ This file shows the set of all isolated CPUs used in existing
+ isolated partitions. It will be empty if no isolated partition
+ is created.
+
cpuset.cpus.partition
A read-write single value file which exists on non-root
cpuset-enabled cgroups. This flag is owned by the parent cgroup
@@ -2358,11 +2394,11 @@ Cpuset Interface Files
partition or scheduling domain. The set of exclusive CPUs is
determined by the value of its "cpuset.cpus.exclusive.effective".
- When set to "isolated", the CPUs in that partition will
- be in an isolated state without any load balancing from the
- scheduler. Tasks placed in such a partition with multiple
- CPUs should be carefully distributed and bound to each of the
- individual CPUs for optimal performance.
+ When set to "isolated", the CPUs in that partition will be in
+ an isolated state without any load balancing from the scheduler
+ and excluded from the unbound workqueues. Tasks placed in such
+ a partition with multiple CPUs should be carefully distributed
+ and bound to each of the individual CPUs for optimal performance.
A partition root ("root" or "isolated") can be in one of the
two possible states - valid or invalid. An invalid partition
diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst
index 78e4d2e7ba14..bced9e4b6e08 100644
--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
+++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
@@ -172,7 +172,7 @@ variables.
Offset of the free_list's member. This value is used to compute the number
of free pages.
-Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
+Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
The free_list represents a linked list of free page blocks.
(list_head, next|prev)
@@ -189,11 +189,11 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
information. Makedumpfile gets the start address of the vmalloc region
from this.
-(zone.free_area, MAX_ORDER + 1)
--------------------------------
+(zone.free_area, NR_PAGE_ORDERS)
+--------------------------------
Free areas descriptor. User-space tools use this value to iterate the
-free_area ranges. MAX_ORDER is used by the zone buddy allocator.
+free_area ranges. NR_PAGE_ORDERS is used by the zone buddy allocator.
prb
---
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 65731b060e3f..e0891ac76ab3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -970,17 +970,17 @@
buddy allocator. Bigger value increase the probability
of catching random memory corruption, but reduce the
amount of memory for normal system use. The maximum
- possible value is MAX_ORDER/2. Setting this parameter
- to 1 or 2 should be enough to identify most random
- memory corruption problems caused by bugs in kernel or
- driver code when a CPU writes to (or reads from) a
- random memory location. Note that there exists a class
- of memory corruptions problems caused by buggy H/W or
- F/W or by drivers badly programming DMA (basically when
- memory is written at bus level and the CPU MMU is
- bypassed) which are not detectable by
- CONFIG_DEBUG_PAGEALLOC, hence this option will not help
- tracking down these problems.
+ possible value is MAX_PAGE_ORDER/2. Setting this
+ parameter to 1 or 2 should be enough to identify most
+ random memory corruption problems caused by bugs in
+ kernel or driver code when a CPU writes to (or reads
+ from) a random memory location. Note that there exists
+ a class of memory corruptions problems caused by buggy
+ H/W or F/W or by drivers badly programming DMA
+ (basically when memory is written at bus level and the
+ CPU MMU is bypassed) which are not detectable by
+ CONFIG_DEBUG_PAGEALLOC, hence this option will not
+ help tracking down these problems.
debug_pagealloc=
[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this parameter
@@ -4136,7 +4136,7 @@
[KNL] Minimal page reporting order
Format: <integer>
Adjust the minimal page reporting order. The page
- reporting is disabled when it exceeds MAX_ORDER.
+ reporting is disabled when it exceeds MAX_PAGE_ORDER.
panic= [KNL] Kernel behaviour on panic: delay <timeout>
timeout > 0: seconds before rebooting
@@ -5544,6 +5544,13 @@
print every Nth verbose statement, where N is the value
specified.
+ regulator_ignore_unused
+ [REGULATOR]
+ Prevents regulator framework from disabling regulators
+ that are unused, due no driver claiming them. This may
+ be useful for debug and development, but should not be
+ needed on a platform with proper driver support.
+
relax_domain_level=
[KNL, SMP] Set scheduler's default relax_domain_level.
See Documentation/admin-guide/cgroup-v1/cpusets.rst.
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index da94feb97ed1..9d23144bf985 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -59,41 +59,47 @@ Files Hierarchy
The files hierarchy of DAMON sysfs interface is shown below. In the below
figure, parents-children relations are represented with indentations, each
directory is having ``/`` suffix, and files in each directory are separated by
-comma (","). ::
-
- /sys/kernel/mm/damon/admin
- │ kdamonds/nr_kdamonds
- │ │ 0/state,pid
- │ │ │ contexts/nr_contexts
- │ │ │ │ 0/avail_operations,operations
- │ │ │ │ │ monitoring_attrs/
+comma (",").
+
+.. parsed-literal::
+
+ :ref:`/sys/kernel/mm/damon <sysfs_root>`/admin
+ │ :ref:`kdamonds <sysfs_kdamonds>`/nr_kdamonds
+ │ │ :ref:`0 <sysfs_kdamond>`/state,pid
+ │ │ │ :ref:`contexts <sysfs_contexts>`/nr_contexts
+ │ │ │ │ :ref:`0 <sysfs_context>`/avail_operations,operations
+ │ │ │ │ │ :ref:`monitoring_attrs <sysfs_monitoring_attrs>`/
│ │ │ │ │ │ intervals/sample_us,aggr_us,update_us
│ │ │ │ │ │ nr_regions/min,max
- │ │ │ │ │ targets/nr_targets
- │ │ │ │ │ │ 0/pid_target
- │ │ │ │ │ │ │ regions/nr_regions
- │ │ │ │ │ │ │ │ 0/start,end
+ │ │ │ │ │ :ref:`targets <sysfs_targets>`/nr_targets
+ │ │ │ │ │ │ :ref:`0 <sysfs_target>`/pid_target
+ │ │ │ │ │ │ │ :ref:`regions <sysfs_regions>`/nr_regions
+ │ │ │ │ │ │ │ │ :ref:`0 <sysfs_region>`/start,end
│ │ │ │ │ │ │ │ ...
│ │ │ │ │ │ ...
- │ │ │ │ │ schemes/nr_schemes
- │ │ │ │ │ │ 0/action,apply_interval_us
- │ │ │ │ │ │ │ access_pattern/
+ │ │ │ │ │ :ref:`schemes <sysfs_schemes>`/nr_schemes
+ │ │ │ │ │ │ :ref:`0 <sysfs_scheme>`/action,apply_interval_us
+ │ │ │ │ │ │ │ :ref:`access_pattern <sysfs_access_pattern>`/
│ │ │ │ │ │ │ │ sz/min,max
│ │ │ │ │ │ │ │ nr_accesses/min,max
│ │ │ │ │ │ │ │ age/min,max
- │ │ │ │ │ │ │ quotas/ms,bytes,reset_interval_ms
+ │ │ │ │ │ │ │ :ref:`quotas <sysfs_quotas>`/ms,bytes,reset_interval_ms
│ │ │ │ │ │ │ │ weights/sz_permil,nr_accesses_permil,age_permil
- │ │ │ │ │ │ │ watermarks/metric,interval_us,high,mid,low
- │ │ │ │ │ │ │ filters/nr_filters
+ │ │ │ │ │ │ │ │ :ref:`goals <sysfs_schemes_quota_goals>`/nr_goals
+ │ │ │ │ │ │ │ │ │ 0/target_value,current_value
+ │ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low
+ │ │ │ │ │ │ │ :ref:`filters <sysfs_filters>`/nr_filters
│ │ │ │ │ │ │ │ 0/type,matching,memcg_id
- │ │ │ │ │ │ │ stats/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds
- │ │ │ │ │ │ │ tried_regions/total_bytes
+ │ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds
+ │ │ │ │ │ │ │ :ref:`tried_regions <sysfs_schemes_tried_regions>`/total_bytes
│ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age
│ │ │ │ │ │ │ │ ...
│ │ │ │ │ │ ...
│ │ │ │ ...
│ │ ...
+.. _sysfs_root:
+
Root
----
@@ -102,6 +108,8 @@ has one directory named ``admin``. The directory contains the files for
privileged user space programs' control of DAMON. User space tools or daemons
having the root permission could use this directory.
+.. _sysfs_kdamonds:
+
kdamonds/
---------
@@ -113,6 +121,8 @@ details) exists. In the beginning, this directory has only one file,
child directories named ``0`` to ``N-1``. Each directory represents each
kdamond.
+.. _sysfs_kdamond:
+
kdamonds/<N>/
-------------
@@ -120,29 +130,37 @@ In each kdamond directory, two files (``state`` and ``pid``) and one directory
(``contexts``) exist.
Reading ``state`` returns ``on`` if the kdamond is currently running, or
-``off`` if it is not running. Writing ``on`` or ``off`` makes the kdamond be
-in the state. Writing ``commit`` to the ``state`` file makes kdamond reads the
-user inputs in the sysfs files except ``state`` file again. Writing
-``update_schemes_stats`` to ``state`` file updates the contents of stats files
-for each DAMON-based operation scheme of the kdamond. For details of the
-stats, please refer to :ref:`stats section <sysfs_schemes_stats>`.
-
-Writing ``update_schemes_tried_regions`` to ``state`` file updates the
-DAMON-based operation scheme action tried regions directory for each
-DAMON-based operation scheme of the kdamond. Writing
-``update_schemes_tried_bytes`` to ``state`` file updates only
-``.../tried_regions/total_bytes`` files. Writing
-``clear_schemes_tried_regions`` to ``state`` file clears the DAMON-based
-operating scheme action tried regions directory for each DAMON-based operation
-scheme of the kdamond. For details of the DAMON-based operation scheme action
-tried regions directory, please refer to :ref:`tried_regions section
-<sysfs_schemes_tried_regions>`.
+``off`` if it is not running.
+
+Users can write below commands for the kdamond to the ``state`` file.
+
+- ``on``: Start running.
+- ``off``: Stop running.
+- ``commit``: Read the user inputs in the sysfs files except ``state`` file
+ again.
+- ``commit_schemes_quota_goals``: Read the DAMON-based operation schemes'
+ :ref:`quota goals <sysfs_schemes_quota_goals>`.
+- ``update_schemes_stats``: Update the contents of stats files for each
+ DAMON-based operation scheme of the kdamond. For details of the stats,
+ please refer to :ref:`stats section <sysfs_schemes_stats>`.
+- ``update_schemes_tried_regions``: Update the DAMON-based operation scheme
+ action tried regions directory for each DAMON-based operation scheme of the
+ kdamond. For details of the DAMON-based operation scheme action tried
+ regions directory, please refer to
+ :ref:`tried_regions section <sysfs_schemes_tried_regions>`.
+- ``update_schemes_tried_bytes``: Update only ``.../tried_regions/total_bytes``
+ files.
+- ``clear_schemes_tried_regions``: Clear the DAMON-based operating scheme
+ action tried regions directory for each DAMON-based operation scheme of the
+ kdamond.
If the state is ``on``, reading ``pid`` shows the pid of the kdamond thread.
``contexts`` directory contains files for controlling the monitoring contexts
that this kdamond will execute.
+.. _sysfs_contexts:
+
kdamonds/<N>/contexts/
----------------------
@@ -153,7 +171,7 @@ number (``N``) to the file creates the number of child directories named as
details). At the moment, only one context per kdamond is supported, so only
``0`` or ``1`` can be written to the file.
-.. _sysfs_contexts:
+.. _sysfs_context:
contexts/<N>/
-------------
@@ -203,6 +221,8 @@ writing to and rading from the files.
For more details about the intervals and monitoring regions range, please refer
to the Design document (:doc:`/mm/damon/design`).
+.. _sysfs_targets:
+
contexts/<N>/targets/
---------------------
@@ -210,6 +230,8 @@ In the beginning, this directory has only one file, ``nr_targets``. Writing a
number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each monitoring target.
+.. _sysfs_target:
+
targets/<N>/
------------
@@ -244,6 +266,8 @@ In the beginning, this directory has only one file, ``nr_regions``. Writing a
number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each initial monitoring target region.
+.. _sysfs_region:
+
regions/<N>/
------------
@@ -254,6 +278,8 @@ region by writing to and reading from the files, respectively.
Each region should not overlap with others. ``end`` of directory ``N`` should
be equal or smaller than ``start`` of directory ``N+1``.
+.. _sysfs_schemes:
+
contexts/<N>/schemes/
---------------------
@@ -265,6 +291,8 @@ In the beginning, this directory has only one file, ``nr_schemes``. Writing a
number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each DAMON-based operation scheme.
+.. _sysfs_scheme:
+
schemes/<N>/
------------
@@ -277,7 +305,7 @@ The ``action`` file is for setting and getting the scheme's :ref:`action
from the file and their meaning are as below.
Note that support of each action depends on the running DAMON operations set
-:ref:`implementation <sysfs_contexts>`.
+:ref:`implementation <sysfs_context>`.
- ``willneed``: Call ``madvise()`` for the region with ``MADV_WILLNEED``.
Supported by ``vaddr`` and ``fvaddr`` operations set.
@@ -299,6 +327,8 @@ Note that support of each action depends on the running DAMON operations set
The ``apply_interval_us`` file is for setting and getting the scheme's
:ref:`apply_interval <damon_design_damos>` in microseconds.
+.. _sysfs_access_pattern:
+
schemes/<N>/access_pattern/
---------------------------
@@ -312,6 +342,8 @@ to and reading from the ``min`` and ``max`` files under ``sz``,
``nr_accesses``, and ``age`` directories, respectively. Note that the ``min``
and the ``max`` form a closed interval.
+.. _sysfs_quotas:
+
schemes/<N>/quotas/
-------------------
@@ -319,8 +351,7 @@ The directory for the :ref:`quotas <damon_design_damos_quotas>` of the given
DAMON-based operation scheme.
Under ``quotas`` directory, three files (``ms``, ``bytes``,
-``reset_interval_ms``) and one directory (``weights``) having three files
-(``sz_permil``, ``nr_accesses_permil``, and ``age_permil``) in it exist.
+``reset_interval_ms``) and two directores (``weights`` and ``goals``) exist.
You can set the ``time quota`` in milliseconds, ``size quota`` in bytes, and
``reset interval`` in milliseconds by writing the values to the three files,
@@ -330,11 +361,37 @@ apply the action to only up to ``bytes`` bytes of memory regions within the
``reset_interval_ms``. Setting both ``ms`` and ``bytes`` zero disables the
quota limits.
-You can also set the :ref:`prioritization weights
+Under ``weights`` directory, three files (``sz_permil``,
+``nr_accesses_permil``, and ``age_permil``) exist.
+You can set the :ref:`prioritization weights
<damon_design_damos_quotas_prioritization>` for size, access frequency, and age
in per-thousand unit by writing the values to the three files under the
``weights`` directory.
+.. _sysfs_schemes_quota_goals:
+
+schemes/<N>/quotas/goals/
+-------------------------
+
+The directory for the :ref:`automatic quota tuning goals
+<damon_design_damos_quotas_auto_tuning>` of the given DAMON-based operation
+scheme.
+
+In the beginning, this directory has only one file, ``nr_goals``. Writing a
+number (``N``) to the file creates the number of child directories named ``0``
+to ``N-1``. Each directory represents each goal and current achievement.
+Among the multiple feedback, the best one is used.
+
+Each goal directory contains two files, namely ``target_value`` and
+``current_value``. Users can set and get any number to those files to set the
+feedback. User space main workload's latency or throughput, system metrics
+like free memory ratio or memory pressure stall time (PSI) could be example
+metrics for the values. Note that users should write
+``commit_schemes_quota_goals`` to the ``state`` file of the :ref:`kdamond
+directory <sysfs_kdamond>` to pass the feedback to DAMON.
+
+.. _sysfs_watermarks:
+
schemes/<N>/watermarks/
-----------------------
@@ -354,6 +411,8 @@ as below.
The ``interval`` should written in microseconds unit.
+.. _sysfs_filters:
+
schemes/<N>/filters/
--------------------
@@ -394,7 +453,7 @@ pages of all memory cgroups except ``/having_care_already``.::
echo N > 1/matching
Note that ``anon`` and ``memcg`` filters are currently supported only when
-``paddr`` :ref:`implementation <sysfs_contexts>` is being used.
+``paddr`` :ref:`implementation <sysfs_context>` is being used.
Also, memory regions that are filtered out by ``addr`` or ``target`` filters
are not counted as the scheme has tried to those, while regions that filtered
@@ -449,6 +508,8 @@ and query-like efficient data access monitoring results retrievals. For the
latter use case, in particular, users can set the ``action`` as ``stat`` and
set the ``access pattern`` as their interested pattern that they want to query.
+.. _sysfs_schemes_tried_region:
+
tried_regions/<N>/
------------------
diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
index e59231ac6bb7..a639cac12477 100644
--- a/Documentation/admin-guide/mm/ksm.rst
+++ b/Documentation/admin-guide/mm/ksm.rst
@@ -80,6 +80,9 @@ pages_to_scan
how many pages to scan before ksmd goes to sleep
e.g. ``echo 100 > /sys/kernel/mm/ksm/pages_to_scan``.
+ The pages_to_scan value cannot be changed if ``advisor_mode`` has
+ been set to scan-time.
+
Default: 100 (chosen for demonstration purposes)
sleep_millisecs
@@ -164,6 +167,29 @@ smart_scan
optimization is enabled. The ``pages_skipped`` metric shows how
effective the setting is.
+advisor_mode
+ The ``advisor_mode`` selects the current advisor. Two modes are
+ supported: none and scan-time. The default is none. By setting
+ ``advisor_mode`` to scan-time, the scan time advisor is enabled.
+ The section about ``advisor`` explains in detail how the scan time
+ advisor works.
+
+adivsor_max_cpu
+ specifies the upper limit of the cpu percent usage of the ksmd
+ background thread. The default is 70.
+
+advisor_target_scan_time
+ specifies the target scan time in seconds to scan all the candidate
+ pages. The default value is 200 seconds.
+
+advisor_min_pages_to_scan
+ specifies the lower limit of the ``pages_to_scan`` parameter of the
+ scan time advisor. The default is 500.
+
+adivsor_max_pages_to_scan
+ specifies the upper limit of the ``pages_to_scan`` parameter of the
+ scan time advisor. The default is 30000.
+
The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``:
general_profit
@@ -263,6 +289,35 @@ ksm_swpin_copy
note that KSM page might be copied when swapping in because do_swap_page()
cannot do all the locking needed to reconstitute a cross-anon_vma KSM page.
+Advisor
+=======
+
+The number of candidate pages for KSM is dynamic. It can be often observed
+that during the startup of an application more candidate pages need to be
+processed. Without an advisor the ``pages_to_scan`` parameter needs to be
+sized for the maximum number of candidate pages. The scan time advisor can
+changes the ``pages_to_scan`` parameter based on demand.
+
+The advisor can be enabled, so KSM can automatically adapt to changes in the
+number of candidate pages to scan. Two advisors are implemented: none and
+scan-time. With none, no advisor is enabled. The default is none.
+
+The scan time advisor changes the ``pages_to_scan`` parameter based on the
+observed scan times. The possible values for the ``pages_to_scan`` parameter is
+limited by the ``advisor_max_cpu`` parameter. In addition there is also the
+``advisor_target_scan_time`` parameter. This parameter sets the target time to
+scan all the KSM candidate pages. The parameter ``advisor_target_scan_time``
+decides how aggressive the scan time advisor scans candidate pages. Lower
+values make the scan time advisor to scan more aggresively. This is the most
+important parameter for the configuration of the scan time advisor.
+
+The initial value and the maximum value can be changed with
+``advisor_min_pages_to_scan`` and ``advisor_max_pages_to_scan``. The default
+values are sufficient for most workloads and use cases.
+
+The ``pages_to_scan`` parameter is re-calculated after a scan has been completed.
+
+
--
Izik Eidus,
Hugh Dickins, 17 Nov 2009
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index fe17cf210426..f5f065c67615 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -253,6 +253,7 @@ Following flags about pages are currently supported:
- ``PAGE_IS_SWAPPED`` - Page is in swapped
- ``PAGE_IS_PFNZERO`` - Page has zero PFN
- ``PAGE_IS_HUGE`` - Page is THP or Hugetlb backed
+- ``PAGE_IS_SOFT_DIRTY`` - Page is soft-dirty
The ``struct pm_scan_arg`` is used as the argument of the IOCTL.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index b0cc8243e093..04eb45a2f940 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -45,10 +45,25 @@ components:
the two is using hugepages just because of the fact the TLB miss is
going to run faster.
+Modern kernels support "multi-size THP" (mTHP), which introduces the
+ability to allocate memory in blocks that are bigger than a base page
+but smaller than traditional PMD-size (as described above), in
+increments of a power-of-2 number of pages. mTHP can back anonymous
+memory (for example 16K, 32K, 64K, etc). These THPs continue to be
+PTE-mapped, but in many cases can still provide similar benefits to
+those outlined above: Page faults are significantly reduced (by a
+factor of e.g. 4, 8, 16, etc), but latency spikes are much less
+prominent because the size of each page isn't as huge as the PMD-sized
+variant and there is less memory to clear in each page fault. Some
+architectures also employ TLB compression mechanisms to squeeze more
+entries in when a set of PTEs are virtually and physically contiguous
+and approporiately aligned. In this case, TLB misses will occur less
+often.
+
THP can be enabled system wide or restricted to certain tasks or even
memory ranges inside task's address space. Unless THP is completely
disabled, there is ``khugepaged`` daemon that scans memory and
-collapses sequences of basic pages into huge pages.
+collapses sequences of basic pages into PMD-sized huge pages.
The THP behaviour is controlled via :ref:`sysfs <thp_sysfs>`
interface and using madvise(2) and prctl(2) system calls.
@@ -95,12 +110,40 @@ Global THP controls
Transparent Hugepage Support for anonymous memory can be entirely disabled
(mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE
regions (to avoid the risk of consuming more memory resources) or enabled
-system wide. This can be achieved with one of::
+system wide. This can be achieved per-supported-THP-size with one of::
+
+ echo always >/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/enabled
+ echo madvise >/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/enabled
+ echo never >/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/enabled
+
+where <size> is the hugepage size being addressed, the available sizes
+for which vary by system.
+
+For example::
+
+ echo always >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled
+
+Alternatively it is possible to specify that a given hugepage size
+will inherit the top-level "enabled" value::
+
+ echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/enabled
+
+For example::
+
+ echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled
+
+The top-level setting (for use with "inherit") can be set by issuing
+one of the following commands::
echo always >/sys/kernel/mm/transparent_hugepage/enabled
echo madvise >/sys/kernel/mm/transparent_hugepage/enabled
echo never >/sys/kernel/mm/transparent_hugepage/enabled
+By default, PMD-sized hugepages have enabled="inherit" and all other
+hugepage sizes have enabled="never". If enabling multiple hugepage
+sizes, the kernel will select the most appropriate enabled size for a
+given allocation.
+
It's also possible to limit defrag efforts in the VM to generate
anonymous hugepages in case they're not immediately free to madvise
regions or to never try to defrag memory and simply fallback to regular
@@ -146,25 +189,34 @@ madvise
never
should be self-explanatory.
-By default kernel tries to use huge zero page on read page fault to
-anonymous mapping. It's possible to disable huge zero page by writing 0
-or enable it back by writing 1::
+By default kernel tries to use huge, PMD-mappable zero page on read
+page fault to anonymous mapping. It's possible to disable huge zero
+page by writing 0 or enable it back by writing 1::
echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page
echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page
-Some userspace (such as a test program, or an optimized memory allocation
-library) may want to know the size (in bytes) of a transparent hugepage::
+Some userspace (such as a test program, or an optimized memory
+allocation library) may want to know the size (in bytes) of a
+PMD-mappable transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
-khugepaged will be automatically started when
-transparent_hugepage/enabled is set to "always" or "madvise, and it'll
-be automatically shutdown if it's set to "never".
+khugepaged will be automatically started when one or more hugepage
+sizes are enabled (either by directly setting "always" or "madvise",
+or by setting "inherit" while the top-level enabled is set to "always"
+or "madvise"), and it'll be automatically shutdown when the last
+hugepage size is disabled (either by directly setting "never", or by
+setting "inherit" while the top-level enabled is set to "never").
Khugepaged controls
-------------------
+.. note::
+ khugepaged currently only searches for opportunities to collapse to
+ PMD-sized THP and no attempt is made to collapse to other THP
+ sizes.
+
khugepaged runs usually at low frequency so while one may not want to
invoke defrag algorithms synchronously during the page faults, it
should be worth invoking defrag at least in khugepaged. However it's
@@ -282,19 +334,26 @@ force
Need of application restart
===========================
-The transparent_hugepage/enabled values and tmpfs mount option only affect
-future behavior. So to make them effective you need to restart any
-application that could have been using hugepages. This also applies to the
-regions registered in khugepaged.
+The transparent_hugepage/enabled and
+transparent_hugepage/hugepages-<size>kB/enabled values and tmpfs mount
+option only affect future behavior. So to make them effective you need
+to restart any application that could have been using hugepages. This
+also applies to the regions registered in khugepaged.
Monitoring usage
================
-The number of anonymous transparent huge pages currently used by the
+.. note::
+ Currently the below counters only record events relating to
+ PMD-sized THP. Events relating to other THP sizes are not included.
+
+The number of PMD-sized anonymous transparent huge pages currently used by the
system is available by reading the AnonHugePages field in ``/proc/meminfo``.
-To identify what applications are using anonymous transparent huge pages,
-it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages fields
-for each mapping.
+To identify what applications are using PMD-sized anonymous transparent huge
+pages, it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages
+fields for each mapping. (Note that AnonHugePages only applies to traditional
+PMD-sized THP for historical reasons and should have been called
+AnonHugePmdMapped).
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
@@ -413,7 +472,7 @@ for huge pages.
Optimizing the applications
===========================
-To be guaranteed that the kernel will map a 2M page immediately in any
+To be guaranteed that the kernel will map a THP immediately in any
memory region, the mmap region has to be hugepage naturally
aligned. posix_memalign() can provide that guarantee.
diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 203e26da5f92..e5cc8848dcb3 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -113,6 +113,9 @@ events, except page fault notifications, may be generated:
areas. ``UFFD_FEATURE_MINOR_SHMEM`` is the analogous feature indicating
support for shmem virtual memory areas.
+- ``UFFD_FEATURE_MOVE`` indicates that the kernel supports moving an
+ existing page contents from userspace.
+
The userland application should set the feature flags it intends to use
when invoking the ``UFFDIO_API`` ioctl, to request that those features be
enabled if supported.
diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst
index 45b98390e938..b42132969e31 100644
--- a/Documentation/admin-guide/mm/zswap.rst
+++ b/Documentation/admin-guide/mm/zswap.rst
@@ -153,6 +153,26 @@ attribute, e. g.::
Setting this parameter to 100 will disable the hysteresis.
+Some users cannot tolerate the swapping that comes with zswap store failures
+and zswap writebacks. Swapping can be disabled entirely (without disabling
+zswap itself) on a cgroup-basis as follows:
+
+ echo 0 > /sys/fs/cgroup/<cgroup-name>/memory.zswap.writeback
+
+Note that if the store failures are recurring (for e.g if the pages are
+incompressible), users can observe reclaim inefficiency after disabling
+writeback (because the same pages might be rejected again and again).
+
+When there is a sizable amount of cold memory residing in the zswap pool, it
+can be advantageous to proactively write these cold pages to swap and reclaim
+the memory for other use cases. By default, the zswap shrinker is disabled.
+User can enable it as follows:
+
+ echo Y > /sys/module/zswap/parameters/shrinker_enabled
+
+This can be enabled at the boot time if ``CONFIG_ZSWAP_SHRINKER_DEFAULT_ON`` is
+selected.
+
A debugfs interface is provided for various statistic about pool size, number
of pages stored, same-value filled pages and various counters for the reasons
pages are rejected.
diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst
new file mode 100644
index 000000000000..d47cd229d710
--- /dev/null
+++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst
@@ -0,0 +1,94 @@
+======================================================================
+Synopsys DesignWare Cores (DWC) PCIe Performance Monitoring Unit (PMU)
+======================================================================
+
+DesignWare Cores (DWC) PCIe PMU
+===============================
+
+The PMU is a PCIe configuration space register block provided by each PCIe Root
+Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error
+injection, and Statistics).
+
+As the name indicates, the RAS DES capability supports system level
+debugging, AER error injection, and collection of statistics. To facilitate
+collection of statistics, Synopsys DesignWare Cores PCIe controller
+provides the following two features:
+
+- one 64-bit counter for Time Based Analysis (RX/TX data throughput and
+ time spent in each low-power LTSSM state) and
+- one 32-bit counter for Event Counting (error and non-error events for
+ a specified lane)
+
+Note: There is no interrupt for counter overflow.
+
+Time Based Analysis
+-------------------
+
+Using this feature you can obtain information regarding RX/TX data
+throughput and time spent in each low-power LTSSM state by the controller.
+The PMU measures data in two categories:
+
+- Group#0: Percentage of time the controller stays in LTSSM states.
+- Group#1: Amount of data processed (Units of 16 bytes).
+
+Lane Event counters
+-------------------
+
+Using this feature you can obtain Error and Non-Error information in
+specific lane by the controller. The PMU event is selected by all of:
+
+- Group i
+- Event j within the Group i
+- Lane k
+
+Some of the events only exist for specific configurations.
+
+DesignWare Cores (DWC) PCIe PMU Driver
+=======================================
+
+This driver adds PMU devices for each PCIe Root Port named based on the BDF of
+the Root Port. For example,
+
+ 30:03.0 PCI bridge: Device 1ded:8000 (rev 01)
+
+the PMU device name for this Root Port is dwc_rootport_3018.
+
+The DWC PCIe PMU driver registers a perf PMU driver, which provides
+description of available events and configuration options in sysfs, see
+/sys/bus/event_source/devices/dwc_rootport_{bdf}.
+
+The "format" directory describes format of the config fields of the
+perf_event_attr structure. The "events" directory provides configuration
+templates for all documented events. For example,
+"Rx_PCIe_TLP_Data_Payload" is an equivalent of "eventid=0x22,type=0x1".
+
+The "perf list" command shall list the available events from sysfs, e.g.::
+
+ $# perf list | grep dwc_rootport
+ <...>
+ dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event]
+ <...>
+ dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event]
+
+Time Based Analysis Event Usage
+-------------------------------
+
+Example usage of counting PCIe RX TLP data payload (Units of bytes)::
+
+ $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/
+
+The average RX/TX bandwidth can be calculated using the following formula:
+
+ PCIe RX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window
+ PCIe TX Bandwidth = Tx_PCIe_TLP_Data_Payload / Measure_Time_Window
+
+Lane Event Usage
+-------------------------------
+
+Each lane has the same event set and to avoid generating a list of hundreds
+of events, the user need to specify the lane ID explicitly, e.g.::
+
+ $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/
+
+The driver does not support sampling, therefore "perf record" will not
+work. Per-task (without "-a") perf sessions are not supported.
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst
index 90926d0fb8ec..77418ae5a290 100644
--- a/Documentation/admin-guide/perf/imx-ddr.rst
+++ b/Documentation/admin-guide/perf/imx-ddr.rst
@@ -13,8 +13,8 @@ is one register for each counter. Counter 0 is special in that it always counts
interrupt is raised. If any other counter overflows, it continues counting, and
no interrupt is raised.
-The "format" directory describes format of the config (event ID) and config1
-(AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/
+The "format" directory describes format of the config (event ID) and config1/2
+(AXI filter setting) fields of the perf_event_attr structure, see /sys/bus/event_source/
devices/imx8_ddr0/format/. The "events" directory describes the events types
hardware supported that can be used with perf tool, see /sys/bus/event_source/
devices/imx8_ddr0/events/. The "caps" directory describes filter features implemented
@@ -28,12 +28,11 @@ in DDR PMU, see /sys/bus/events_source/devices/imx8_ddr0/caps/.
AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write)
to count reading or writing matches filter setting. Filter setting is various
from different DRAM controller implementations, which is distinguished by quirks
-in the driver. You also can dump info from userspace, filter in "caps" directory
-indicates whether PMU supports AXI ID filter or not; enhanced_filter indicates
-whether PMU supports enhanced AXI ID filter or not. Value 0 for un-supported, and
-value 1 for supported.
+in the driver. You also can dump info from userspace, "caps" directory show the
+type of AXI filter (filter, enhanced_filter and super_filter). Value 0 for
+un-supported, and value 1 for supported.
-* With DDR_CAP_AXI_ID_FILTER quirk(filter: 1, enhanced_filter: 0).
+* With DDR_CAP_AXI_ID_FILTER quirk(filter: 1, enhanced_filter: 0, super_filter: 0).
Filter is defined with two configuration parts:
--AXI_ID defines AxID matching value.
--AXI_MASKING defines which bits of AxID are meaningful for the matching.
@@ -65,7 +64,37 @@ value 1 for supported.
perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
-* With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1).
+* With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1, super_filter: 0).
This is an extension to the DDR_CAP_AXI_ID_FILTER quirk which permits
counting the number of bytes (as opposed to the number of bursts) from DDR
read and write transactions concurrently with another set of data counters.
+
+* With DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER quirk(filter: 0, enhanced_filter: 0, super_filter: 1).
+ There is a limitation in previous AXI filter, it cannot filter different IDs
+ at the same time as the filter is shared between counters. This quirk is the
+ extension of AXI ID filter. One improvement is that counter 1-3 has their own
+ filter, means that it supports concurrently filter various IDs. Another
+ improvement is that counter 1-3 supports AXI PORT and CHANNEL selection. Support
+ selecting address channel or data channel.
+
+ Filter is defined with 2 configuration registers per counter 1-3.
+ --Counter N MASK COMP register - including AXI_ID and AXI_MASKING.
+ --Counter N MUX CNTL register - including AXI CHANNEL and AXI PORT.
+
+ - 0: address channel
+ - 1: data channel
+
+ PMU in DDR subsystem, only one single port0 exists, so axi_port is reserved
+ which should be 0.
+
+ .. code-block:: bash
+
+ perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD,axi_channel=0xH/ cmd
+ perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD,axi_channel=0xH/ cmd
+
+ .. note::
+
+ axi_channel is inverted in userspace, and it will be reverted in driver
+ automatically. So that users do not need specify axi_channel if want to
+ monitor data channel from DDR transactions, since data channel is more
+ meaningful.
diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst
index a2e6f2c81146..f4a4513c526f 100644
--- a/Documentation/admin-guide/perf/index.rst
+++ b/Documentation/admin-guide/perf/index.rst
@@ -19,6 +19,7 @@ Performance monitor support
arm_dsu_pmu
thunderx2-pmu
alibaba_pmu
+ dwc_pcie_pmu
nvidia-pmu
meson-ddr-pmu
cxl
diff --git a/Documentation/arch/arm64/arm-acpi.rst b/Documentation/arch/arm64/arm-acpi.rst
index a46c34fa9604..e59e4505d0d9 100644
--- a/Documentation/arch/arm64/arm-acpi.rst
+++ b/Documentation/arch/arm64/arm-acpi.rst
@@ -130,7 +130,7 @@ When an Arm system boots, it can either have DT information, ACPI tables,
or in some very unusual cases, both. If no command line parameters are used,
the kernel will try to use DT for device enumeration; if there is no DT
present, the kernel will try to use ACPI tables, but only if they are present.
-In neither is available, the kernel will not boot. If acpi=force is used
+If neither is available, the kernel will not boot. If acpi=force is used
on the command line, the kernel will attempt to use ACPI tables first, but
fall back to DT if there are no ACPI tables present. The basic idea is that
the kernel will not fail to boot unless it absolutely has no other choice.
diff --git a/Documentation/arch/arm64/perf.rst b/Documentation/arch/arm64/perf.rst
index 1f87b57c2332..997fd716b82f 100644
--- a/Documentation/arch/arm64/perf.rst
+++ b/Documentation/arch/arm64/perf.rst
@@ -164,3 +164,75 @@ and should be used to mask the upper bits as needed.
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/perf/arch/arm64/tests/user-events.c
.. _tools/lib/perf/tests/test-evsel.c:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/perf/tests/test-evsel.c
+
+Event Counting Threshold
+==========================================
+
+Overview
+--------
+
+FEAT_PMUv3_TH (Armv8.8) permits a PMU counter to increment only on
+events whose count meets a specified threshold condition. For example if
+threshold_compare is set to 2 ('Greater than or equal'), and the
+threshold is set to 2, then the PMU counter will now only increment by
+when an event would have previously incremented the PMU counter by 2 or
+more on a single processor cycle.
+
+To increment by 1 after passing the threshold condition instead of the
+number of events on that cycle, add the 'threshold_count' option to the
+commandline.
+
+How-to
+------
+
+These are the parameters for controlling the feature:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Parameter
+ - Description
+ * - threshold
+ - Value to threshold the event by. A value of 0 means that
+ thresholding is disabled and the other parameters have no effect.
+ * - threshold_compare
+ - | Comparison function to use, with the following values supported:
+ |
+ | 0: Not-equal
+ | 1: Equals
+ | 2: Greater-than-or-equal
+ | 3: Less-than
+ * - threshold_count
+ - If this is set, count by 1 after passing the threshold condition
+ instead of the value of the event on this cycle.
+
+The threshold, threshold_compare and threshold_count values can be
+provided per event, for example:
+
+.. code-block:: sh
+
+ perf stat -e stall_slot/threshold=2,threshold_compare=2/ \
+ -e dtlb_walk/threshold=10,threshold_compare=3,threshold_count/
+
+In this example the stall_slot event will count by 2 or more on every
+cycle where 2 or more stalls happen. And dtlb_walk will count by 1 on
+every cycle where the number of dtlb walks were less than 10.
+
+The maximum supported threshold value can be read from the caps of each
+PMU, for example:
+
+.. code-block:: sh
+
+ cat /sys/bus/event_source/devices/armv8_pmuv3/caps/threshold_max
+
+ 0x000000ff
+
+If a value higher than this is given, then opening the event will result
+in an error. The highest possible maximum is 4095, as the config field
+for threshold is limited to 12 bits, and the Perf tool will refuse to
+parse higher values.
+
+If the PMU doesn't support FEAT_PMUv3_TH, then threshold_max will read
+0, and attempting to set a threshold value will also result in an error.
+threshold_max will also read as 0 on aarch32 guests, even if the host
+is running on hardware with the feature.
diff --git a/Documentation/arch/x86/cpuinfo.rst b/Documentation/arch/x86/cpuinfo.rst
index 08246e8ac835..8895784d4784 100644
--- a/Documentation/arch/x86/cpuinfo.rst
+++ b/Documentation/arch/x86/cpuinfo.rst
@@ -7,27 +7,74 @@ x86 Feature Flags
Introduction
============
-On x86, flags appearing in /proc/cpuinfo have an X86_FEATURE definition
-in arch/x86/include/asm/cpufeatures.h. If the kernel cares about a feature
-or KVM want to expose the feature to a KVM guest, it can and should have
-an X86_FEATURE_* defined. These flags represent hardware features as
-well as software features.
-
-If users want to know if a feature is available on a given system, they
-try to find the flag in /proc/cpuinfo. If a given flag is present, it
-means that the kernel supports it and is currently making it available.
-If such flag represents a hardware feature, it also means that the
-hardware supports it.
-
-If the expected flag does not appear in /proc/cpuinfo, things are murkier.
-Users need to find out the reason why the flag is missing and find the way
-how to enable it, which is not always easy. There are several factors that
-can explain missing flags: the expected feature failed to enable, the feature
-is missing in hardware, platform firmware did not enable it, the feature is
-disabled at build or run time, an old kernel is in use, or the kernel does
-not support the feature and thus has not enabled it. In general, /proc/cpuinfo
-shows features which the kernel supports. For a full list of CPUID flags
-which the CPU supports, use tools/arch/x86/kcpuid.
+The list of feature flags in /proc/cpuinfo is not complete and
+represents an ill-fated attempt from long time ago to put feature flags
+in an easy to find place for userspace.
+
+However, the amount of feature flags is growing by the CPU generation,
+leading to unparseable and unwieldy /proc/cpuinfo.
+
+What is more, those feature flags do not even need to be in that file
+because userspace doesn't care about them - glibc et al already use
+CPUID to find out what the target machine supports and what not.
+
+And even if it doesn't show a particular feature flag - although the CPU
+still does have support for the respective hardware functionality and
+said CPU supports CPUID faulting - userspace can simply probe for the
+feature and figure out if it is supported or not, regardless of whether
+it is being advertised somewhere.
+
+Furthermore, those flag strings become an ABI the moment they appear
+there and maintaining them forever when nothing even uses them is a lot
+of wasted effort.
+
+So, the current use of /proc/cpuinfo is to show features which the
+kernel has *enabled* and *supports*. As in: the CPUID feature flag is
+there, there's an additional setup which the kernel has done while
+booting and the functionality is ready to use. A perfect example for
+that is "user_shstk" where additional code enablement is present in the
+kernel to support shadow stack for user programs.
+
+So, if users want to know if a feature is available on a given system,
+they try to find the flag in /proc/cpuinfo. If a given flag is present,
+it means that
+
+* the kernel knows about the feature enough to have an X86_FEATURE bit
+
+* the kernel supports it and is currently making it available either to
+ userspace or some other part of the kernel
+
+* if the flag represents a hardware feature the hardware supports it.
+
+The absence of a flag in /proc/cpuinfo by itself means almost nothing to
+an end user.
+
+On the one hand, a feature like "vaes" might be fully available to user
+applications on a kernel that has not defined X86_FEATURE_VAES and thus
+there is no "vaes" in /proc/cpuinfo.
+
+On the other hand, a new kernel running on non-VAES hardware would also
+have no "vaes" in /proc/cpuinfo. There's no way for an application or
+user to tell the difference.
+
+The end result is that the flags field in /proc/cpuinfo is marginally
+useful for kernel debugging, but not really for anything else.
+Applications should instead use things like the glibc facilities for
+querying CPU support. Users should rely on tools like
+tools/arch/x86/kcpuid and cpuid(1).
+
+Regarding implementation, flags appearing in /proc/cpuinfo have an
+X86_FEATURE definition in arch/x86/include/asm/cpufeatures.h. These flags
+represent hardware features as well as software features.
+
+If the kernel cares about a feature or KVM want to expose the feature to
+a KVM guest, it should only then expose it to the guest when the guest
+needs to parse /proc/cpuinfo. Which, as mentioned above, is highly
+unlikely. KVM can synthesize the CPUID bit and the KVM guest can simply
+query CPUID and figure out what the hypervisor supports and what not. As
+already stated, /proc/cpuinfo is not a dumping ground for useless
+feature flags.
+
How are feature flags created?
==============================
diff --git a/Documentation/arch/x86/pti.rst b/Documentation/arch/x86/pti.rst
index 4b858a9bad8d..e08d35177bc0 100644
--- a/Documentation/arch/x86/pti.rst
+++ b/Documentation/arch/x86/pti.rst
@@ -81,11 +81,9 @@ this protection comes at a cost:
and exit (it can be skipped when the kernel is interrupted,
though.) Moves to CR3 are on the order of a hundred
cycles, and are required at every entry and exit.
- b. A "trampoline" must be used for SYSCALL entry. This
- trampoline depends on a smaller set of resources than the
- non-PTI SYSCALL entry code, so requires mapping fewer
- things into the userspace page tables. The downside is
- that stacks must be switched at entry time.
+ b. Percpu TSS is mapped into the user page tables to allow SYSCALL64 path
+ to work under PTI. This doesn't have a direct runtime cost but it can
+ be argued it opens certain timing attack scenarios.
c. Global pages are disabled for all kernel structures not
mapped into both kernel and userspace page tables. This
feature of the MMU allows different processes to share TLB
@@ -167,7 +165,7 @@ that are worth noting here.
* Failures of the selftests/x86 code. Usually a bug in one of the
more obscure corners of entry_64.S
* Crashes in early boot, especially around CPU bringup. Bugs
- in the trampoline code or mappings cause these.
+ in the mappings cause these.
* Crashes at the first interrupt. Caused by bugs in entry_64.S,
like screwing up a page table switch. Also caused by
incorrectly mapping the IRQ handler entry code.
diff --git a/Documentation/core-api/maple_tree.rst b/Documentation/core-api/maple_tree.rst
index 96f3d5f076b5..ccdd1615cf97 100644
--- a/Documentation/core-api/maple_tree.rst
+++ b/Documentation/core-api/maple_tree.rst
@@ -81,6 +81,9 @@ section.
Sometimes it is necessary to ensure the next call to store to a maple tree does
not allocate memory, please see :ref:`maple-tree-advanced-api` for this use case.
+You can use mtree_dup() to duplicate an entire maple tree. It is a more
+efficient way than inserting all elements one by one into a new tree.
+
Finally, you can remove all entries from a maple tree by calling
mtree_destroy(). If the maple tree entries are pointers, you may wish to free
the entries first.
@@ -112,6 +115,7 @@ Takes ma_lock internally:
* mtree_insert()
* mtree_insert_range()
* mtree_erase()
+ * mtree_dup()
* mtree_destroy()
* mt_set_in_rcu()
* mt_clear_in_rcu()
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 2d091c873d1e..af8151db88b2 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -37,7 +37,7 @@ The Slab Cache
.. kernel-doc:: include/linux/slab.h
:internal:
-.. kernel-doc:: mm/slab.c
+.. kernel-doc:: mm/slub.c
:export:
.. kernel-doc:: mm/slab_common.c
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
index 6a206111d4e0..ebb40c48950a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
@@ -29,6 +29,12 @@ properties:
maxItems: 1
description:
Specifies the base address and size of vMPM registers in RPM MSG RAM.
+ deprecated: true
+
+ qcom,rpm-msg-ram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the APSS MPM slice of the RPM Message RAM
interrupts:
maxItems: 1
@@ -67,34 +73,46 @@ properties:
required:
- compatible
- - reg
- interrupts
- mboxes
- interrupt-controller
- '#interrupt-cells'
- qcom,mpm-pin-count
- qcom,mpm-pin-map
+ - qcom,rpm-msg-ram
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- mpm: interrupt-controller@45f01b8 {
- compatible = "qcom,mpm";
- interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
- reg = <0x45f01b8 0x1000>;
- mboxes = <&apcs_glb 1>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&intc>;
- qcom,mpm-pin-count = <96>;
- qcom,mpm-pin-map = <2 275>,
- <5 296>,
- <12 422>,
- <24 79>,
- <86 183>,
- <90 260>,
- <91 260>;
- #power-domain-cells = <0>;
+
+ remoteproc-rpm {
+ compatible = "qcom,msm8998-rpm-proc", "qcom,rpm-proc";
+
+ glink-edge {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ mboxes = <&apcs_glb 0>;
+ };
+
+ mpm: interrupt-controller {
+ compatible = "qcom,mpm";
+ qcom,rpm-msg-ram = <&apss_mpm>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apcs_glb 1>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ qcom,mpm-pin-count = <96>;
+ qcom,mpm-pin-map = <2 275>,
+ <5 296>,
+ <12 422>,
+ <24 79>,
+ <86 183>,
+ <91 260>;
+ #power-domain-cells = <0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
index 2ef3081eaaf3..d3b5aec0a3f7 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
@@ -26,6 +26,7 @@ properties:
- renesas,r9a07g043u-irqc # RZ/G2UL
- renesas,r9a07g044-irqc # RZ/G2{L,LC}
- renesas,r9a07g054-irqc # RZ/V2L
+ - renesas,r9a08g045-irqc # RZ/G3S
- const: renesas,rzg2l-irqc
'#interrupt-cells':
@@ -167,7 +168,9 @@ allOf:
properties:
compatible:
contains:
- const: renesas,r9a07g043u-irqc
+ enum:
+ - renesas,r9a07g043u-irqc
+ - renesas,r9a08g045-irqc
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/mtd/partitions/u-boot.yaml b/Documentation/devicetree/bindings/mtd/partitions/u-boot.yaml
index 3c56efe48efd..327fa872c001 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/u-boot.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/u-boot.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: U-Boot bootloader partition
description: |
- U-Boot is a bootlodaer commonly used in embedded devices. It's almost always
+ U-Boot is a bootloader commonly used in embedded devices. It's almost always
located on some kind of flash device.
Device configuration is stored as a set of environment variables that are
diff --git a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
index e9fad4b3de68..6c96a4204e5d 100644
--- a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
+++ b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
@@ -27,6 +27,9 @@ properties:
- fsl,imx8mq-ddr-pmu
- fsl,imx8mp-ddr-pmu
- const: fsl,imx8m-ddr-pmu
+ - items:
+ - const: fsl,imx8dxl-ddr-pmu
+ - const: fsl,imx8-ddr-pmu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index ce7751b9129c..9ff9abf2691a 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -105,6 +105,8 @@ properties:
description:
Interrupt signaling a critical under-voltage event.
+ system-critical-regulator: true
+
required:
- compatible
- regulator-name
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
index acd37f28ef53..27c6d5152413 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
@@ -42,6 +42,7 @@ description: |
For PM7325, smps1 - smps8, ldo1 - ldo19
For PM8005, smps1 - smps4
For PM8009, smps1 - smps2, ldo1 - ldo7
+ For PM8010, ldo1 - ldo7
For PM8150, smps1 - smps10, ldo1 - ldo18
For PM8150L, smps1 - smps8, ldo1 - ldo11, bob, flash, rgb
For PM8350, smps1 - smps12, ldo1 - ldo10
@@ -68,6 +69,7 @@ properties:
- qcom,pm8005-rpmh-regulators
- qcom,pm8009-rpmh-regulators
- qcom,pm8009-1-rpmh-regulators
+ - qcom,pm8010-rpmh-regulators
- qcom,pm8150-rpmh-regulators
- qcom,pm8150l-rpmh-regulators
- qcom,pm8350-rpmh-regulators
@@ -242,6 +244,18 @@ allOf:
properties:
compatible:
enum:
+ - qcom,pm8010-rpmh-regulators
+ then:
+ properties:
+ vdd-l1-l2-supply: true
+ vdd-l3-l4-supply: true
+ patternProperties:
+ "^vdd-l[5-7]-supply$": true
+
+ - if:
+ properties:
+ compatible:
+ enum:
- qcom,pm8150-rpmh-regulators
- qcom,pmc8180-rpmh-regulators
- qcom,pmm8155au-rpmh-regulators
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
index 9ea8ac0786ac..f2fd2df68a9e 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
@@ -47,6 +47,9 @@ description:
For pm8916, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
l12, l13, l14, l15, l16, l17, l18
+ For pm8937, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10,
+ l11, l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23
+
For pm8941, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2,
lvs3, 5vs1, 5vs2
@@ -92,6 +95,7 @@ properties:
- qcom,rpm-pm8841-regulators
- qcom,rpm-pm8909-regulators
- qcom,rpm-pm8916-regulators
+ - qcom,rpm-pm8937-regulators
- qcom,rpm-pm8941-regulators
- qcom,rpm-pm8950-regulators
- qcom,rpm-pm8953-regulators
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
index 7a1b7d2abbd4..aea849e8eadf 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
@@ -22,6 +22,7 @@ properties:
- qcom,pm8841-regulators
- qcom,pm8909-regulators
- qcom,pm8916-regulators
+ - qcom,pm8937-regulators
- qcom,pm8941-regulators
- qcom,pm8950-regulators
- qcom,pm8994-regulators
@@ -296,6 +297,24 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pm8937-regulators
+ then:
+ properties:
+ vdd_l1_l19-supply: true
+ vdd_l20_l21-supply: true
+ vdd_l2_l23-supply: true
+ vdd_l3-supply: true
+ vdd_l4_l5_l6_l7_l16-supply: true
+ vdd_l8_l11_l12_l17_l22-supply: true
+ vdd_l9_l10_l13_l14_l15_l18-supply: true
+ patternProperties:
+ "^vdd_s[1-6]-supply$": true
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,pm8950-regulators
then:
properties:
diff --git a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
index 89c564dfa5db..534f87e98716 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
@@ -36,10 +36,11 @@ unevaluatedProperties: false
examples:
- |
- pm8150b {
+ pmic {
#address-cells = <1>;
#size-cells = <0>;
- pm8150b_vbus: usb-vbus-regulator@1100 {
+
+ usb-vbus-regulator@1100 {
compatible = "qcom,pm8150b-vbus-reg";
reg = <0x1100>;
regulator-min-microamp = <500000>;
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 9daf0fc2465f..1ef380d1515e 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -114,6 +114,11 @@ properties:
description: Enable pull down resistor when the regulator is disabled.
type: boolean
+ system-critical-regulator:
+ description: Set if the regulator is critical to system stability or
+ functionality.
+ type: boolean
+
regulator-over-current-protection:
description: Enable over current protection.
type: boolean
@@ -181,6 +186,14 @@ properties:
be enabled but limit setting can be omitted. Limit is given as microvolt
offset from voltage set to regulator.
+ regulator-uv-less-critical-window-ms:
+ description: Specifies the time window (in milliseconds) following a
+ critical under-voltage event during which the system can continue to
+ operate safely while performing less critical operations. This property
+ provides a defined duration before a more severe reaction to the
+ under-voltage event is needed, allowing for certain non-urgent actions to
+ be carried out in preparation for potential power loss.
+
regulator-temp-protection-kelvin:
description: Set over temperature protection limit. This is a limit where
hardware performs emergency shutdown. Zero can be passed to disable
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
deleted file mode 100644
index 8a18d71e6879..000000000000
--- a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Analog Devices AXI SPI Engine controller Device Tree Bindings
-
-Required properties:
-- compatible : Must be "adi,axi-spi-engine-1.00.a""
-- reg : Physical base address and size of the register map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "s_axi_aclk", "spi_clk"
-- clocks : Clock phandles and specifiers (See clock bindings for
- details on clock-names and clocks).
-- #address-cells : Must be <1>
-- #size-cells : Must be <0>
-
-Optional subnodes:
- Subnodes are use to represent the SPI slave devices connected to the SPI
- master. They follow the generic SPI bindings as outlined in spi-bus.txt.
-
-Example:
-
- spi@@44a00000 {
- compatible = "adi,axi-spi-engine-1.00.a";
- reg = <0x44a00000 0x1000>;
- interrupts = <0 56 4>;
- clocks = <&clkc 15 &clkc 15>;
- clock-names = "s_axi_aclk", "spi_clk";
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* SPI devices */
- };
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
new file mode 100644
index 000000000000..d48faa42d025
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/adi,axi-spi-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AXI SPI Engine Controller
+
+description: |
+ The AXI SPI Engine controller is part of the SPI Engine framework[1] and
+ allows memory mapped access to the SPI Engine control bus. This allows it
+ to be used as a general purpose software driven SPI controller as well as
+ some optional advanced acceleration and offloading capabilities.
+
+ [1] https://wiki.analog.com/resources/fpga/peripherals/spi_engine
+
+maintainers:
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ const: adi,axi-spi-engine-1.00.a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: The AXI interconnect clock.
+ - description: The SPI controller clock.
+
+ clock-names:
+ items:
+ - const: s_axi_aclk
+ - const: spi_clk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@44a00000 {
+ compatible = "adi,axi-spi-engine-1.00.a";
+ reg = <0x44a00000 0x1000>;
+ interrupts = <0 56 4>;
+ clocks = <&clkc 15>, <&clkc 15>;
+ clock-names = "s_axi_aclk", "spi_clk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* SPI devices */
+ };
diff --git a/Documentation/devicetree/bindings/spi/renesas,rspi.yaml b/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
index 4d8ec69214c9..0ef3f8421986 100644
--- a/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
@@ -21,7 +21,7 @@ properties:
- enum:
- renesas,rspi-r7s72100 # RZ/A1H
- renesas,rspi-r7s9210 # RZ/A2
- - renesas,r9a07g043-rspi # RZ/G2UL
+ - renesas,r9a07g043-rspi # RZ/G2UL and RZ/Five
- renesas,r9a07g044-rspi # RZ/G2{L,LC}
- renesas,r9a07g054-rspi # RZ/V2L
- const: renesas,rspi-rz
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index 6348a387a21c..fde3776a558b 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -72,8 +72,6 @@ properties:
- const: snps,dw-apb-ssi
- description: Intel Keem Bay SPI Controller
const: intel,keembay-ssi
- - description: Intel Thunder Bay SPI Controller
- const: intel,thunderbay-ssi
- description: Intel Mount Evans Integrated Management Complex SPI Controller
const: intel,mountevans-imc-ssi
- description: AMD Pensando Elba SoC SPI Controller
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
index ae0f082bd377..4bd9aeb81208 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
@@ -23,7 +23,9 @@ properties:
compatible:
enum:
- st,stm32f4-spi
+ - st,stm32f7-spi
- st,stm32h7-spi
+ - st,stm32mp25-spi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
index fbd4212285e2..9b2272a9ec15 100644
--- a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
+++ b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
@@ -16,6 +16,7 @@ properties:
- allwinner,sun8i-a83t-ths
- allwinner,sun8i-h3-ths
- allwinner,sun8i-r40-ths
+ - allwinner,sun20i-d1-ths
- allwinner,sun50i-a64-ths
- allwinner,sun50i-a100-ths
- allwinner,sun50i-h5-ths
@@ -61,6 +62,7 @@ allOf:
compatible:
contains:
enum:
+ - allwinner,sun20i-d1-ths
- allwinner,sun50i-a100-ths
- allwinner,sun50i-h6-ths
@@ -84,7 +86,9 @@ allOf:
properties:
compatible:
contains:
- const: allwinner,sun8i-h3-ths
+ enum:
+ - allwinner,sun8i-h3-ths
+ - allwinner,sun20i-d1-ths
then:
properties:
@@ -103,6 +107,7 @@ allOf:
enum:
- allwinner,sun8i-h3-ths
- allwinner,sun8i-r40-ths
+ - allwinner,sun20i-d1-ths
- allwinner,sun50i-a64-ths
- allwinner,sun50i-a100-ths
- allwinner,sun50i-h5-ths
diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
index 7538469997f9..b634f57cd011 100644
--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
@@ -10,6 +10,9 @@ maintainers:
- zhanghongchen <zhanghongchen@loongson.cn>
- Yinbo Zhu <zhuyinbo@loongson.cn>
+allOf:
+ - $ref: /schemas/thermal/thermal-sensor.yaml#
+
properties:
compatible:
oneOf:
@@ -26,12 +29,16 @@ properties:
interrupts:
maxItems: 1
+ '#thermal-sensor-cells':
+ const: 1
+
required:
- compatible
- reg
- interrupts
+ - '#thermal-sensor-cells'
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -41,4 +48,5 @@ examples:
reg = <0x1fe01500 0x30>;
interrupt-parent = <&liointc0>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/thermal/mediatek,thermal.yaml b/Documentation/devicetree/bindings/thermal/mediatek,thermal.yaml
new file mode 100644
index 000000000000..d96a2e32bd8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/mediatek,thermal.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/mediatek,thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek thermal controller for on-SoC temperatures
+
+maintainers:
+ - Sascha Hauer <s.hauer@pengutronix.de>
+
+description:
+ This device does not have its own ADC, instead it directly controls the AUXADC
+ via AHB bus accesses. For this reason it needs phandles to the AUXADC. Also it
+ controls a mux in the apmixedsys register space via AHB bus accesses, so a
+ phandle to the APMIXEDSYS is also needed.
+
+allOf:
+ - $ref: thermal-sensor.yaml#
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt2701-thermal
+ - mediatek,mt2712-thermal
+ - mediatek,mt7622-thermal
+ - mediatek,mt7981-thermal
+ - mediatek,mt7986-thermal
+ - mediatek,mt8173-thermal
+ - mediatek,mt8183-thermal
+ - mediatek,mt8365-thermal
+ - mediatek,mt8516-thermal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Main clock needed for register access
+ - description: The AUXADC clock
+
+ clock-names:
+ items:
+ - const: therm
+ - const: auxadc
+
+ mediatek,auxadc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle to the AUXADC which the thermal controller uses
+
+ mediatek,apmixedsys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle to the APMIXEDSYS controller
+
+ resets:
+ description: Reset controller controlling the thermal controller
+
+ nvmem-cells:
+ items:
+ - description:
+ NVMEM cell with EEPROMA phandle to the calibration data provided by an
+ NVMEM device. If unspecified default values shall be used.
+
+ nvmem-cell-names:
+ items:
+ - const: calibration-data
+
+required:
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - mediatek,auxadc
+ - mediatek,apmixedsys
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/reset/mt8173-resets.h>
+
+ thermal@1100b000 {
+ compatible = "mediatek,mt8173-thermal";
+ reg = <0x1100b000 0x1000>;
+ interrupts = <0 70 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_THERM>, <&pericfg CLK_PERI_AUXADC>;
+ clock-names = "therm", "auxadc";
+ resets = <&pericfg MT8173_PERI_THERM_SW_RST>;
+ mediatek,auxadc = <&auxadc>;
+ mediatek,apmixedsys = <&apmixedsys>;
+ nvmem-cells = <&thermal_calibration_data>;
+ nvmem-cell-names = "calibration-data";
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
deleted file mode 100644
index ac39c7156fde..000000000000
--- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* Mediatek Thermal
-
-This describes the device tree binding for the Mediatek thermal controller
-which measures the on-SoC temperatures. This device does not have its own ADC,
-instead it directly controls the AUXADC via AHB bus accesses. For this reason
-this device needs phandles to the AUXADC. Also it controls a mux in the
-apmixedsys register space via AHB bus accesses, so a phandle to the APMIXEDSYS
-is also needed.
-
-Required properties:
-- compatible:
- - "mediatek,mt8173-thermal" : For MT8173 family of SoCs
- - "mediatek,mt2701-thermal" : For MT2701 family of SoCs
- - "mediatek,mt2712-thermal" : For MT2712 family of SoCs
- - "mediatek,mt7622-thermal" : For MT7622 SoC
- - "mediatek,mt7981-thermal", "mediatek,mt7986-thermal" : For MT7981 SoC
- - "mediatek,mt7986-thermal" : For MT7986 SoC
- - "mediatek,mt8183-thermal" : For MT8183 family of SoCs
- - "mediatek,mt8365-thermal" : For MT8365 family of SoCs
- - "mediatek,mt8516-thermal", "mediatek,mt2701-thermal : For MT8516 family of SoCs
-- reg: Address range of the thermal controller
-- interrupts: IRQ for the thermal controller
-- clocks, clock-names: Clocks needed for the thermal controller. required
- clocks are:
- "therm": Main clock needed for register access
- "auxadc": The AUXADC clock
-- mediatek,auxadc: A phandle to the AUXADC which the thermal controller uses
-- mediatek,apmixedsys: A phandle to the APMIXEDSYS controller.
-- #thermal-sensor-cells : Should be 0. See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for a description.
-
-Optional properties:
-- resets: Reference to the reset controller controlling the thermal controller.
-- nvmem-cells: A phandle to the calibration data provided by a nvmem device. If
- unspecified default values shall be used.
-- nvmem-cell-names: Should be "calibration-data"
-
-Example:
-
- thermal: thermal@1100b000 {
- #thermal-sensor-cells = <1>;
- compatible = "mediatek,mt8173-thermal";
- reg = <0 0x1100b000 0 0x1000>;
- interrupts = <0 70 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&pericfg CLK_PERI_THERM>, <&pericfg CLK_PERI_AUXADC>;
- clock-names = "therm", "auxadc";
- resets = <&pericfg MT8173_PERI_THERM_SW_RST>;
- reset-names = "therm";
- mediatek,auxadc = <&auxadc>;
- mediatek,apmixedsys = <&apmixedsys>;
- nvmem-cells = <&thermal_calibration_data>;
- nvmem-cell-names = "calibration-data";
- };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
index 01253d58bf9f..7541e27704ca 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
@@ -114,12 +114,14 @@ examples:
- |
#include <dt-bindings/iio/qcom,spmi-vadc.h>
#include <dt-bindings/interrupt-controller/irq.h>
- spmi_bus {
+
+ pmic {
#address-cells = <1>;
#size-cells = <0>;
+
pm8998_adc: adc@3100 {
- reg = <0x3100>;
compatible = "qcom,spmi-adc-rev2";
+ reg = <0x3100>;
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
@@ -130,7 +132,7 @@ examples:
};
};
- pm8998_adc_tm: adc-tm@3400 {
+ adc-tm@3400 {
compatible = "qcom,spmi-adc-tm-hc";
reg = <0x3400>;
interrupts = <0x2 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
index 3c81def03c84..d9d2657287cb 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
@@ -167,12 +167,14 @@ examples:
- |
#include <dt-bindings/iio/qcom,spmi-vadc.h>
#include <dt-bindings/interrupt-controller/irq.h>
- spmi_bus {
+
+ pmic {
#address-cells = <1>;
#size-cells = <0>;
+
pm8150b_adc: adc@3100 {
- reg = <0x3100>;
compatible = "qcom,spmi-adc5";
+ reg = <0x3100>;
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
@@ -186,7 +188,7 @@ examples:
};
};
- pm8150b_adc_tm: adc-tm@3500 {
+ adc-tm@3500 {
compatible = "qcom,spmi-adc-tm5";
reg = <0x3500>;
interrupts = <0x2 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
@@ -207,12 +209,14 @@ examples:
#include <dt-bindings/iio/qcom,spmi-adc7-pmk8350.h>
#include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h>
#include <dt-bindings/interrupt-controller/irq.h>
- spmi_bus {
+
+ pmic {
#address-cells = <1>;
#size-cells = <0>;
+
pmk8350_vadc: adc@3100 {
- reg = <0x3100>;
compatible = "qcom,spmi-adc7";
+ reg = <0x3100>;
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
@@ -233,7 +237,7 @@ examples:
};
};
- pmk8350_adc_tm: adc-tm@3400 {
+ adc-tm@3400 {
compatible = "qcom,spmi-adc-tm5-gen2";
reg = <0x3400>;
interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 437b74732886..99d9c526c0b6 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -66,6 +66,7 @@ properties:
- qcom,sm8350-tsens
- qcom,sm8450-tsens
- qcom,sm8550-tsens
+ - qcom,sm8650-tsens
- const: qcom,tsens-v2
- description: v2 of TSENS with combined interrupt
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index 4a8dabc48170..dbd52620d293 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -75,6 +75,22 @@ patternProperties:
framework and assumes that the thermal sensors in this zone
support interrupts.
+ critical-action:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: |
+ The action the OS should perform after the critical temperature is reached.
+ By default the system will shutdown as a safe action to prevent damage
+ to the hardware, if the property is not set.
+ The shutdown action should be always the default and preferred one.
+ Choose 'reboot' with care, as the hardware may be in thermal stress,
+ thus leading to infinite reboots that may cause damage to the hardware.
+ Make sure the firmware/bootloader will act as the last resort and take
+ over the thermal control.
+
+ enum:
+ - shutdown
+ - reboot
+
thermal-sensors:
$ref: /schemas/types.yaml#/definitions/phandle-array
maxItems: 1
diff --git a/Documentation/driver-api/mtd/spi-nor.rst b/Documentation/driver-api/mtd/spi-nor.rst
index c22f8c0f7950..148fa4288760 100644
--- a/Documentation/driver-api/mtd/spi-nor.rst
+++ b/Documentation/driver-api/mtd/spi-nor.rst
@@ -2,64 +2,204 @@
SPI NOR framework
=================
-Part I - Why do we need this framework?
----------------------------------------
-
-SPI bus controllers (drivers/spi/) only deal with streams of bytes; the bus
-controller operates agnostic of the specific device attached. However, some
-controllers (such as Freescale's QuadSPI controller) cannot easily handle
-arbitrary streams of bytes, but rather are designed specifically for SPI NOR.
-
-In particular, Freescale's QuadSPI controller must know the NOR commands to
-find the right LUT sequence. Unfortunately, the SPI subsystem has no notion of
-opcodes, addresses, or data payloads; a SPI controller simply knows to send or
-receive bytes (Tx and Rx). Therefore, we must define a new layering scheme under
-which the controller driver is aware of the opcodes, addressing, and other
-details of the SPI NOR protocol.
-
-Part II - How does the framework work?
---------------------------------------
-
-This framework just adds a new layer between the MTD and the SPI bus driver.
-With this new layer, the SPI NOR controller driver does not depend on the
-m25p80 code anymore.
-
-Before this framework, the layer is like::
-
- MTD
- ------------------------
- m25p80
- ------------------------
- SPI bus driver
- ------------------------
- SPI NOR chip
-
-After this framework, the layer is like::
-
- MTD
- ------------------------
- SPI NOR framework
- ------------------------
- m25p80
- ------------------------
- SPI bus driver
- ------------------------
- SPI NOR chip
-
-With the SPI NOR controller driver (Freescale QuadSPI), it looks like::
-
- MTD
- ------------------------
- SPI NOR framework
- ------------------------
- fsl-quadSPI
- ------------------------
- SPI NOR chip
-
-Part III - How can drivers use the framework?
----------------------------------------------
-
-The main API is spi_nor_scan(). Before you call the hook, a driver should
-initialize the necessary fields for spi_nor{}. Please see
-drivers/mtd/spi-nor/spi-nor.c for detail. Please also refer to spi-fsl-qspi.c
-when you want to write a new driver for a SPI NOR controller.
+How to propose a new flash addition
+-----------------------------------
+
+Most SPI NOR flashes comply with the JEDEC JESD216
+Serial Flash Discoverable Parameter (SFDP) standard. SFDP describes
+the functional and feature capabilities of serial flash devices in a
+standard set of internal read-only parameter tables.
+
+The SPI NOR driver queries the SFDP tables in order to determine the
+flash's parameters and settings. If the flash defines the SFDP tables
+it's likely that you won't need a flash entry at all, and instead
+rely on the generic flash driver which probes the flash solely based
+on its SFDP data. All one has to do is to specify the "jedec,spi-nor"
+compatible in the device tree.
+
+There are cases however where you need to define an explicit flash
+entry. This typically happens when the flash has settings or support
+that is not covered by the SFDP tables (e.g. Block Protection), or
+when the flash contains mangled SFDP data. If the later, one needs
+to implement the ``spi_nor_fixups`` hooks in order to amend the SFDP
+parameters with the correct values.
+
+Minimum testing requirements
+-----------------------------
+
+Do all the tests from below and paste them in the commit's comments
+section, after the ``---`` marker.
+
+1) Specify the controller that you used to test the flash and specify
+ the frequency at which the flash was operated, e.g.::
+
+ This flash is populated on the X board and was tested at Y
+ frequency using the Z (put compatible) SPI controller.
+
+2) Dump the sysfs entries and print the md5/sha1/sha256 SFDP checksum::
+
+ root@1:~# cat /sys/bus/spi/devices/spi0.0/spi-nor/partname
+ sst26vf064b
+ root@1:~# cat /sys/bus/spi/devices/spi0.0/spi-nor/jedec_id
+ bf2643
+ root@1:~# cat /sys/bus/spi/devices/spi0.0/spi-nor/manufacturer
+ sst
+ root@1:~# xxd -p /sys/bus/spi/devices/spi0.0/spi-nor/sfdp
+ 53464450060102ff00060110300000ff81000106000100ffbf0001180002
+ 0001fffffffffffffffffffffffffffffffffd20f1ffffffff0344eb086b
+ 083b80bbfeffffffffff00ffffff440b0c200dd80fd810d820914824806f
+ 1d81ed0f773830b030b0f7ffffff29c25cfff030c080ffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffff0004fff37f0000f57f0000f9ff
+ 7d00f57f0000f37f0000ffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ ffffbf2643ffb95ffdff30f260f332ff0a122346ff0f19320f1919ffffff
+ ffffffff00669938ff05013506040232b03072428de89888a585c09faf5a
+ ffff06ec060c0003080bffffffffff07ffff0202ff060300fdfd040700fc
+ 0300fefe0202070e
+ root@1:~# sha256sum /sys/bus/spi/devices/spi0.0/spi-nor/sfdp
+ 428f34d0461876f189ac97f93e68a05fa6428c6650b3b7baf736a921e5898ed1 /sys/bus/spi/devices/spi0.0/spi-nor/sfdp
+
+ Please dump the SFDP tables using ``xxd -p``. It enables us to do
+ the reverse operation and convert the hexdump to binary with
+ ``xxd -rp``. Dumping the SFDP data with ``hexdump -Cv`` is accepted,
+ but less desirable.
+
+3) Dump debugfs data::
+
+ root@1:~# cat /sys/kernel/debug/spi-nor/spi0.0/capabilities
+ Supported read modes by the flash
+ 1S-1S-1S
+ opcode 0x03
+ mode cycles 0
+ dummy cycles 0
+ 1S-1S-1S (fast read)
+ opcode 0x0b
+ mode cycles 0
+ dummy cycles 8
+ 1S-1S-2S
+ opcode 0x3b
+ mode cycles 0
+ dummy cycles 8
+ 1S-2S-2S
+ opcode 0xbb
+ mode cycles 4
+ dummy cycles 0
+ 1S-1S-4S
+ opcode 0x6b
+ mode cycles 0
+ dummy cycles 8
+ 1S-4S-4S
+ opcode 0xeb
+ mode cycles 2
+ dummy cycles 4
+ 4S-4S-4S
+ opcode 0x0b
+ mode cycles 2
+ dummy cycles 4
+
+ Supported page program modes by the flash
+ 1S-1S-1S
+ opcode 0x02
+
+ root@1:~# cat /sys/kernel/debug/spi-nor/spi0.0/params
+ name sst26vf064b
+ id bf 26 43 bf 26 43
+ size 8.00 MiB
+ write size 1
+ page size 256
+ address nbytes 3
+ flags HAS_LOCK | HAS_16BIT_SR | SOFT_RESET | SWP_IS_VOLATILE
+
+ opcodes
+ read 0xeb
+ dummy cycles 6
+ erase 0x20
+ program 0x02
+ 8D extension none
+
+ protocols
+ read 1S-4S-4S
+ write 1S-1S-1S
+ register 1S-1S-1S
+
+ erase commands
+ 20 (4.00 KiB) [0]
+ d8 (8.00 KiB) [1]
+ d8 (32.0 KiB) [2]
+ d8 (64.0 KiB) [3]
+ c7 (8.00 MiB)
+
+ sector map
+ region (in hex) | erase mask | flags
+ ------------------+------------+----------
+ 00000000-00007fff | [01 ] |
+ 00008000-0000ffff | [0 2 ] |
+ 00010000-007effff | [0 3] |
+ 007f0000-007f7fff | [0 2 ] |
+ 007f8000-007fffff | [01 ] |
+
+4) Use `mtd-utils <https://git.infradead.org/mtd-utils.git>`__
+ and verify that erase, read and page program operations work fine::
+
+ root@1:~# dd if=/dev/urandom of=./spi_test bs=1M count=2
+ 2+0 records in
+ 2+0 records out
+ 2097152 bytes (2.1 MB, 2.0 MiB) copied, 0.848566 s, 2.5 MB/s
+
+ root@1:~# mtd_debug erase /dev/mtd0 0 2097152
+ Erased 2097152 bytes from address 0x00000000 in flash
+
+ root@1:~# mtd_debug read /dev/mtd0 0 2097152 spi_read
+ Copied 2097152 bytes from address 0x00000000 in flash to spi_read
+
+ root@1:~# hexdump spi_read
+ 0000000 ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ 0200000
+
+ root@1:~# sha256sum spi_read
+ 4bda3a28f4ffe603c0ec1258c0034d65a1a0d35ab7bd523a834608adabf03cc5 spi_read
+
+ root@1:~# mtd_debug write /dev/mtd0 0 2097152 spi_test
+ Copied 2097152 bytes from spi_test to address 0x00000000 in flash
+
+ root@1:~# mtd_debug read /dev/mtd0 0 2097152 spi_read
+ Copied 2097152 bytes from address 0x00000000 in flash to spi_read
+
+ root@1:~# sha256sum spi*
+ c444216a6ba2a4a66cccd60a0dd062bce4b865dd52b200ef5e21838c4b899ac8 spi_read
+ c444216a6ba2a4a66cccd60a0dd062bce4b865dd52b200ef5e21838c4b899ac8 spi_test
+
+ If the flash comes erased by default and the previous erase was ignored,
+ we won't catch it, thus test the erase again::
+
+ root@1:~# mtd_debug erase /dev/mtd0 0 2097152
+ Erased 2097152 bytes from address 0x00000000 in flash
+
+ root@1:~# mtd_debug read /dev/mtd0 0 2097152 spi_read
+ Copied 2097152 bytes from address 0x00000000 in flash to spi_read
+
+ root@1:~# sha256sum spi*
+ 4bda3a28f4ffe603c0ec1258c0034d65a1a0d35ab7bd523a834608adabf03cc5 spi_read
+ c444216a6ba2a4a66cccd60a0dd062bce4b865dd52b200ef5e21838c4b899ac8 spi_test
+
+ Dump some other relevant data::
+
+ root@1:~# mtd_debug info /dev/mtd0
+ mtd.type = MTD_NORFLASH
+ mtd.flags = MTD_CAP_NORFLASH
+ mtd.size = 8388608 (8M)
+ mtd.erasesize = 4096 (4K)
+ mtd.writesize = 1
+ mtd.oobsize = 0
+ regions = 0
diff --git a/Documentation/driver-api/surface_aggregator/ssh.rst b/Documentation/driver-api/surface_aggregator/ssh.rst
index b955b673838b..58a757319931 100644
--- a/Documentation/driver-api/surface_aggregator/ssh.rst
+++ b/Documentation/driver-api/surface_aggregator/ssh.rst
@@ -39,7 +39,7 @@ Note that the standard disclaimer for this subsystem also applies to this
document: All of this has been reverse-engineered and may thus be erroneous
and/or incomplete.
-All CRCs used in the following are two-byte ``crc_ccitt_false(0xffff, ...)``.
+All CRCs used in the following are two-byte ``crc_itu_t(0xffff, ...)``.
All multi-byte values are little-endian, there is no implicit padding between
values.
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 7be2900806c8..421daf837940 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -261,7 +261,7 @@ prototypes::
struct folio *src, enum migrate_mode);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
int (*swap_deactivate)(struct file *);
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
@@ -287,7 +287,7 @@ direct_IO:
migrate_folio: yes (both)
launder_folio: yes
is_partially_uptodate: yes
-error_remove_page: yes
+error_remove_folio: yes
swap_activate: no
swap_deactivate: no
swap_rw: yes, unlocks
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 878e72b2f8b7..ced3a6761329 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1061,3 +1061,15 @@ export_operations ->encode_fh() no longer has a default implementation to
encode FILEID_INO32_GEN* file handles.
Filesystems that used the default implementation may use the generic helper
generic_encode_ino32_fh() explicitly.
+
+---
+
+**recommended**
+
+Block device freezing and thawing have been moved to holder operations.
+
+Before this change, get_active_super() would only be able to find the
+superblock of the main block device, i.e., the one stored in sb->s_bdev. Block
+device freezing now works for any block device owned by a given superblock, not
+just the main block device. The get_active_super() helper and bd_fsfreeze_sb
+pointer are gone.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 49ef12df631b..104c6d047d9b 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -528,9 +528,9 @@ replaced by copy-on-write) part of the underlying shmem object out on swap.
does not take into account swapped out page of underlying shmem objects.
"Locked" indicates whether the mapping is locked in memory or not.
-"THPeligible" indicates whether the mapping is eligible for allocating THP
-pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise.
-It just shows the current status.
+"THPeligible" indicates whether the mapping is eligible for allocating
+naturally aligned THP pages of any currently enabled size. 1 if true, 0
+otherwise.
"VmFlags" field deserves a separate description. This member represents the
kernel flags associated with the particular virtual memory area in two letter
diff --git a/Documentation/filesystems/squashfs.rst b/Documentation/filesystems/squashfs.rst
index df42106bae71..4af8d6207509 100644
--- a/Documentation/filesystems/squashfs.rst
+++ b/Documentation/filesystems/squashfs.rst
@@ -64,6 +64,66 @@ obtained from this site also.
The squashfs-tools development tree is now located on kernel.org
git://git.kernel.org/pub/scm/fs/squashfs/squashfs-tools.git
+2.1 Mount options
+-----------------
+=================== =========================================================
+errors=%s Specify whether squashfs errors trigger a kernel panic
+ or not
+
+ ========== =============================================
+ continue errors don't trigger a panic (default)
+ panic trigger a panic when errors are encountered,
+ similar to several other filesystems (e.g.
+ btrfs, ext4, f2fs, GFS2, jfs, ntfs, ubifs)
+
+ This allows a kernel dump to be saved,
+ useful for analyzing and debugging the
+ corruption.
+ ========== =============================================
+threads=%s Select the decompression mode or the number of threads
+
+ If SQUASHFS_CHOICE_DECOMP_BY_MOUNT is set:
+
+ ========== =============================================
+ single use single-threaded decompression (default)
+
+ Only one block (data or metadata) can be
+ decompressed at any one time. This limits
+ CPU and memory usage to a minimum, but it
+ also gives poor performance on parallel I/O
+ workloads when using multiple CPU machines
+ due to waiting on decompressor availability.
+ multi use up to two parallel decompressors per core
+
+ If you have a parallel I/O workload and your
+ system has enough memory, using this option
+ may improve overall I/O performance. It
+ dynamically allocates decompressors on a
+ demand basis.
+ percpu use a maximum of one decompressor per core
+
+ It uses percpu variables to ensure
+ decompression is load-balanced across the
+ cores.
+ 1|2|3|... configure the number of threads used for
+ decompression
+
+ The upper limit is num_online_cpus() * 2.
+ ========== =============================================
+
+ If SQUASHFS_CHOICE_DECOMP_BY_MOUNT is **not** set and
+ SQUASHFS_DECOMP_MULTI, SQUASHFS_MOUNT_DECOMP_THREADS are
+ both set:
+
+ ========== =============================================
+ 2|3|... configure the number of threads used for
+ decompression
+
+ The upper limit is num_online_cpus() * 2.
+ ========== =============================================
+
+=================== =========================================================
+
3. Squashfs Filesystem Design
-----------------------------
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 99acc2e98673..dd99ce5912d8 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -823,7 +823,7 @@ cache in your filesystem. The following members are defined:
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback)(struct folio *, bool *, bool *);
- int (*error_remove_page) (struct mapping *mapping, struct page *page);
+ int (*error_remove_folio)(struct mapping *mapping, struct folio *);
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
int (*swap_deactivate)(struct file *);
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
@@ -1034,8 +1034,8 @@ cache in your filesystem. The following members are defined:
VM if a folio should be treated as dirty or writeback for the
purposes of stalling.
-``error_remove_page``
- normally set to generic_error_remove_page if truncation is ok
+``error_remove_folio``
+ normally set to generic_error_remove_folio if truncation is ok
for this address space. Used for memory failure handling.
Setting this implies you deal with pages going away under you,
unless you have them locked or reference counts increased.
diff --git a/Documentation/i2c/i2c-address-translators.rst b/Documentation/i2c/i2c-address-translators.rst
index b22ce9f41ecf..6845c114e472 100644
--- a/Documentation/i2c/i2c-address-translators.rst
+++ b/Documentation/i2c/i2c-address-translators.rst
@@ -71,7 +71,7 @@ Transaction:
- Physical I2C transaction on bus A, slave address 0x20
- ATR chip detects transaction on address 0x20, finds it in table,
propagates transaction on bus B with address translated to 0x10,
- keeps clock streched on bus A waiting for reply
+ keeps clock stretched on bus A waiting for reply
- Slave X chip (on bus B) detects transaction at its own physical
address 0x10 and replies normally
- ATR chip stops clock stretching and forwards reply on bus A,
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 9dfdc826618c..36e61783437c 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -113,6 +113,7 @@ to ReStructured Text format, or are simply too old.
:maxdepth: 1
staging/index
+ RAS/ras
Translations
diff --git a/Documentation/locking/mutex-design.rst b/Documentation/locking/mutex-design.rst
index 78540cd7f54b..7c30b4aa5e28 100644
--- a/Documentation/locking/mutex-design.rst
+++ b/Documentation/locking/mutex-design.rst
@@ -101,6 +101,24 @@ features that make lock debugging easier and faster:
- Detects multi-task circular deadlocks and prints out all affected
locks and tasks (and only those tasks).
+Mutexes - and most other sleeping locks like rwsems - do not provide an
+implicit reference for the memory they occupy, which reference is released
+with mutex_unlock().
+
+[ This is in contrast with spin_unlock() [or completion_done()], which
+ APIs can be used to guarantee that the memory is not touched by the
+ lock implementation after spin_unlock()/completion_done() releases
+ the lock. ]
+
+mutex_unlock() may access the mutex structure even after it has internally
+released the lock already - so it's not safe for another context to
+acquire the mutex and assume that the mutex_unlock() context is not using
+the structure anymore.
+
+The mutex user must ensure that the mutex is not destroyed while a
+release operation is still in progress - in other words, callers of
+mutex_unlock() must ensure that the mutex stays alive until mutex_unlock()
+has returned.
Interfaces
----------
diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index c82e3ee20e51..2466d3363af7 100644
--- a/Documentation/mm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
@@ -18,8 +18,6 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+
| pte_same | Tests whether both PTE entries are the same |
+---------------------------+--------------------------------------------------+
-| pte_bad | Tests a non-table mapped PTE |
-+---------------------------+--------------------------------------------------+
| pte_present | Tests a valid mapped PTE |
+---------------------------+--------------------------------------------------+
| pte_young | Tests a young PTE |
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index 1f7e0586b5fa..1bb69524a62e 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -5,6 +5,18 @@ Design
======
+.. _damon_design_execution_model_and_data_structures:
+
+Execution Model and Data Structures
+===================================
+
+The monitoring-related information including the monitoring request
+specification and DAMON-based operation schemes are stored in a data structure
+called DAMON ``context``. DAMON executes each context with a kernel thread
+called ``kdamond``. Multiple kdamonds could run in parallel, for different
+types of monitoring.
+
+
Overall Architecture
====================
@@ -346,6 +358,19 @@ the weight will be respected are up to the underlying prioritization mechanism
implementation.
+.. _damon_design_damos_quotas_auto_tuning:
+
+Aim-oriented Feedback-driven Auto-tuning
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Automatic feedback-driven quota tuning. Instead of setting the absolute quota
+value, users can repeatedly provide numbers representing how much of their goal
+for the scheme is achieved as feedback. DAMOS then automatically tunes the
+aggressiveness (the quota) of the corresponding scheme. For example, if DAMOS
+is under achieving the goal, DAMOS automatically increases the quota. If DAMOS
+is over achieving the goal, it decreases the quota.
+
+
.. _damon_design_damos_watermarks:
Watermarks
@@ -477,15 +502,3 @@ modules for proactive reclamation and LRU lists manipulation are provided. For
more detail, please read the usage documents for those
(:doc:`/admin-guide/mm/damon/reclaim` and
:doc:`/admin-guide/mm/damon/lru_sort`).
-
-
-.. _damon_design_execution_model_and_data_structures:
-
-Execution Model and Data Structures
-===================================
-
-The monitoring-related information including the monitoring request
-specification and DAMON-based operation schemes are stored in a data structure
-called DAMON ``context``. DAMON executes each context with a kernel thread
-called ``kdamond``. Multiple kdamonds could run in parallel, for different
-types of monitoring.
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index 9a607059ea11..93c9239b9ebe 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -117,7 +117,7 @@ pages:
- map/unmap of a PMD entry for the whole THP increment/decrement
folio->_entire_mapcount and also increment/decrement
- folio->_nr_pages_mapped by COMPOUND_MAPPED when _entire_mapcount
+ folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount
goes from -1 to 0 or 0 to -1.
- map/unmap of individual pages with PTE entry increment/decrement
@@ -156,7 +156,7 @@ Partial unmap and deferred_split_folio()
Unmapping part of THP (with munmap() or other way) is not going to free
memory immediately. Instead, we detect that a subpage of THP is not in use
-in page_remove_rmap() and queue the THP for splitting if memory pressure
+in folio_remove_rmap_*() and queue the THP for splitting if memory pressure
comes. Splitting will free up unused subpages.
Splitting the page right away is not an option due to locking context in
diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst
index 67f1338440a5..b6a07a26b10d 100644
--- a/Documentation/mm/unevictable-lru.rst
+++ b/Documentation/mm/unevictable-lru.rst
@@ -486,7 +486,7 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
Before the unevictable/mlock changes, mlocking did not mark the pages in any
way, so unmapping them required no processing.
-For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+For each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls
munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED
(unless it was a PTE mapping of a part of a transparent huge page).
@@ -511,7 +511,7 @@ userspace; truncation even unmaps and deletes any private anonymous pages
which had been Copied-On-Write from the file pages now being truncated.
Mlocked pages can be munlocked and deleted in this way: like with munmap(),
-for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+for each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls
munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED
(unless it was a PTE mapping of a part of a transparent huge page).
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 4dfe0d9a57bb..7afff42612e9 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2511,7 +2511,7 @@ temp_valid_lft - INTEGER
temp_prefered_lft - INTEGER
Preferred lifetime (in seconds) for temporary addresses. If
temp_prefered_lft is less than the minimum required lifetime (typically
- 5 seconds), the preferred lifetime is the minimum required. If
+ 5 seconds), temporary addresses will not be created. If
temp_prefered_lft is greater than temp_valid_lft, the preferred lifetime
is temp_valid_lft.
diff --git a/Documentation/networking/packet_mmap.rst b/Documentation/networking/packet_mmap.rst
index 30a3be3c48f3..dca15d15feaf 100644
--- a/Documentation/networking/packet_mmap.rst
+++ b/Documentation/networking/packet_mmap.rst
@@ -263,20 +263,20 @@ the name indicates, this function allocates pages of memory, and the second
argument is "order" or a power of two number of pages, that is
(for PAGE_SIZE == 4096) order=0 ==> 4096 bytes, order=1 ==> 8192 bytes,
order=2 ==> 16384 bytes, etc. The maximum size of a
-region allocated by __get_free_pages is determined by the MAX_ORDER macro. More
-precisely the limit can be calculated as::
+region allocated by __get_free_pages is determined by the MAX_PAGE_ORDER macro.
+More precisely the limit can be calculated as::
- PAGE_SIZE << MAX_ORDER
+ PAGE_SIZE << MAX_PAGE_ORDER
In a i386 architecture PAGE_SIZE is 4096 bytes
- In a 2.4/i386 kernel MAX_ORDER is 10
- In a 2.6/i386 kernel MAX_ORDER is 11
+ In a 2.4/i386 kernel MAX_PAGE_ORDER is 10
+ In a 2.6/i386 kernel MAX_PAGE_ORDER is 11
So get_free_pages can allocate as much as 4MB or 8MB in a 2.4/2.6 kernel
respectively, with an i386 architecture.
User space programs can include /usr/include/sys/user.h and
-/usr/include/linux/mmzone.h to get PAGE_SIZE MAX_ORDER declarations.
+/usr/include/linux/mmzone.h to get PAGE_SIZE MAX_PAGE_ORDER declarations.
The pagesize can also be determined dynamically with the getpagesize (2)
system call.
@@ -324,7 +324,7 @@ Definitions:
(see /proc/slabinfo)
<pointer size> depends on the architecture -- ``sizeof(void *)``
<page size> depends on the architecture -- PAGE_SIZE or getpagesize (2)
-<max-order> is the value defined with MAX_ORDER
+<max-order> is the value defined with MAX_PAGE_ORDER
<frame size> it's an upper bound of frame's capture size (more on this later)
============== ================================================================
diff --git a/Documentation/power/freezing-of-tasks.rst b/Documentation/power/freezing-of-tasks.rst
index 53b6a56c4635..df9755bfbd94 100644
--- a/Documentation/power/freezing-of-tasks.rst
+++ b/Documentation/power/freezing-of-tasks.rst
@@ -14,27 +14,28 @@ architectures).
II. How does it work?
=====================
-There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN
-and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have
-PF_NOFREEZE unset (all user space processes and some kernel threads) are
-regarded as 'freezable' and treated in a special way before the system enters a
-suspend state as well as before a hibernation image is created (in what follows
-we only consider hibernation, but the description also applies to suspend).
+There is one per-task flag (PF_NOFREEZE) and three per-task states
+(TASK_FROZEN, TASK_FREEZABLE and __TASK_FREEZABLE_UNSAFE) used for that.
+The tasks that have PF_NOFREEZE unset (all user space tasks and some kernel
+threads) are regarded as 'freezable' and treated in a special way before the
+system enters a sleep state as well as before a hibernation image is created
+(hibernation is directly covered by what follows, but the description applies
+to system-wide suspend too).
Namely, as the first step of the hibernation procedure the function
freeze_processes() (defined in kernel/power/process.c) is called. A system-wide
-variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate
-whether the system is to undergo a freezing operation. And freeze_processes()
-sets this variable. After this, it executes try_to_freeze_tasks() that sends a
-fake signal to all user space processes, and wakes up all the kernel threads.
-All freezable tasks must react to that by calling try_to_freeze(), which
-results in a call to __refrigerator() (defined in kernel/freezer.c), which sets
-the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes
-it loop until PF_FROZEN is cleared for it. Then, we say that the task is
-'frozen' and therefore the set of functions handling this mechanism is referred
-to as 'the freezer' (these functions are defined in kernel/power/process.c,
-kernel/freezer.c & include/linux/freezer.h). User space processes are generally
-frozen before kernel threads.
+static key freezer_active (as opposed to a per-task flag or state) is used to
+indicate whether the system is to undergo a freezing operation. And
+freeze_processes() sets this static key. After this, it executes
+try_to_freeze_tasks() that sends a fake signal to all user space processes, and
+wakes up all the kernel threads. All freezable tasks must react to that by
+calling try_to_freeze(), which results in a call to __refrigerator() (defined
+in kernel/freezer.c), which changes the task's state to TASK_FROZEN, and makes
+it loop until it is woken by an explicit TASK_FROZEN wakeup. Then, that task
+is regarded as 'frozen' and so the set of functions handling this mechanism is
+referred to as 'the freezer' (these functions are defined in
+kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). User space
+tasks are generally frozen before kernel threads.
__refrigerator() must not be called directly. Instead, use the
try_to_freeze() function (defined in include/linux/freezer.h), that checks
@@ -43,31 +44,40 @@ if the task is to be frozen and makes the task enter __refrigerator().
For user space processes try_to_freeze() is called automatically from the
signal-handling code, but the freezable kernel threads need to call it
explicitly in suitable places or use the wait_event_freezable() or
-wait_event_freezable_timeout() macros (defined in include/linux/freezer.h)
-that combine interruptible sleep with checking if the task is to be frozen and
-calling try_to_freeze(). The main loop of a freezable kernel thread may look
+wait_event_freezable_timeout() macros (defined in include/linux/wait.h)
+that put the task to sleep (TASK_INTERRUPTIBLE) or freeze it (TASK_FROZEN) if
+freezer_active is set. The main loop of a freezable kernel thread may look
like the following one::
set_freezable();
- do {
- hub_events();
- wait_event_freezable(khubd_wait,
- !list_empty(&hub_event_list) ||
- kthread_should_stop());
- } while (!kthread_should_stop() || !list_empty(&hub_event_list));
-
-(from drivers/usb/core/hub.c::hub_thread()).
-
-If a freezable kernel thread fails to call try_to_freeze() after the freezer has
-initiated a freezing operation, the freezing of tasks will fail and the entire
-hibernation operation will be cancelled. For this reason, freezable kernel
-threads must call try_to_freeze() somewhere or use one of the
+
+ while (true) {
+ struct task_struct *tsk = NULL;
+
+ wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
+ spin_lock_irq(&oom_reaper_lock);
+ if (oom_reaper_list != NULL) {
+ tsk = oom_reaper_list;
+ oom_reaper_list = tsk->oom_reaper_list;
+ }
+ spin_unlock_irq(&oom_reaper_lock);
+
+ if (tsk)
+ oom_reap_task(tsk);
+ }
+
+(from mm/oom_kill.c::oom_reaper()).
+
+If a freezable kernel thread is not put to the frozen state after the freezer
+has initiated a freezing operation, the freezing of tasks will fail and the
+entire system-wide transition will be cancelled. For this reason, freezable
+kernel threads must call try_to_freeze() somewhere or use one of the
wait_event_freezable() and wait_event_freezable_timeout() macros.
After the system memory state has been restored from a hibernation image and
devices have been reinitialized, the function thaw_processes() is called in
-order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
-have been frozen leave __refrigerator() and continue running.
+order to wake up each frozen task. Then, the tasks that have been frozen leave
+__refrigerator() and continue running.
Rationale behind the functions dealing with freezing and thawing of tasks
@@ -96,7 +106,8 @@ III. Which kernel threads are freezable?
Kernel threads are not freezable by default. However, a kernel thread may clear
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
directly is not allowed). From this point it is regarded as freezable
-and must call try_to_freeze() in a suitable place.
+and must call try_to_freeze() or variants of wait_event_freezable() in a
+suitable place.
IV. Why do we do that?
======================
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index f68919800f05..6cffffe26500 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -180,7 +180,7 @@ This is the (partial) list of the hooks:
compat_yield sysctl is turned on; in that case, it places the scheduling
entity at the right-most end of the red-black tree.
- - check_preempt_curr(...)
+ - wakeup_preempt(...)
This function checks if a task that entered the runnable state should
preempt the currently running task.
@@ -189,10 +189,10 @@ This is the (partial) list of the hooks:
This function chooses the most appropriate task eligible to run next.
- - set_curr_task(...)
+ - set_next_task(...)
- This function is called when a task changes its scheduling class or changes
- its task group.
+ This function is called when a task changes its scheduling class, changes
+ its task group or is scheduled.
- task_tick(...)
diff --git a/Documentation/scheduler/schedutil.rst b/Documentation/scheduler/schedutil.rst
index 32c7d69fc86c..803fba8fc714 100644
--- a/Documentation/scheduler/schedutil.rst
+++ b/Documentation/scheduler/schedutil.rst
@@ -90,8 +90,8 @@ For more detail see:
- Documentation/scheduler/sched-capacity.rst:"1. CPU Capacity + 2. Task utilization"
-UTIL_EST / UTIL_EST_FASTUP
-==========================
+UTIL_EST
+========
Because periodic tasks have their averages decayed while they sleep, even
though when running their expected utilization will be the same, they suffer a
@@ -99,8 +99,7 @@ though when running their expected utilization will be the same, they suffer a
To alleviate this (a default enabled option) UTIL_EST drives an Infinite
Impulse Response (IIR) EWMA with the 'running' value on dequeue -- when it is
-highest. A further default enabled option UTIL_EST_FASTUP modifies the IIR
-filter to instantly increase and only decay on decrease.
+highest. UTIL_EST filters to instantly increase and only decay on decrease.
A further runqueue wide sum (of runnable tasks) is maintained of:
diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst
index 04f2a3856c40..19479b801826 100644
--- a/Documentation/spi/pxa2xx.rst
+++ b/Documentation/spi/pxa2xx.rst
@@ -3,13 +3,13 @@ PXA2xx SPI on SSP driver HOWTO
==============================
This a mini HOWTO on the pxa2xx_spi driver. The driver turns a PXA2xx
-synchronous serial port into an SPI master controller
+synchronous serial port into an SPI host controller
(see Documentation/spi/spi-summary.rst). The driver has the following features
- Support for any PXA2xx and compatible SSP.
- SSP PIO and SSP DMA data transfers.
- External and Internal (SSPFRM) chip selects.
-- Per slave device (chip) configuration.
+- Per peripheral device (chip) configuration.
- Full suspend, freeze, resume support.
The driver is built around a &struct spi_message FIFO serviced by kernel
@@ -17,10 +17,10 @@ thread. The kernel thread, spi_pump_messages(), drives message FIFO and
is responsible for queuing SPI transactions and setting up and launching
the DMA or interrupt driven transfers.
-Declaring PXA2xx Master Controllers
------------------------------------
-Typically, for a legacy platform, an SPI master is defined in the
-arch/.../mach-*/board-*.c as a "platform device". The master configuration
+Declaring PXA2xx host controllers
+---------------------------------
+Typically, for a legacy platform, an SPI host controller is defined in the
+arch/.../mach-*/board-*.c as a "platform device". The host controller configuration
is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
struct pxa2xx_spi_controller {
@@ -30,7 +30,7 @@ is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
};
The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
-slave device (chips) attached to this SPI master.
+peripheral devices (chips) attached to this SPI host controller.
The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
be used. This caused the driver to acquire two DMA channels: Rx channel and
@@ -40,8 +40,8 @@ See the "PXA2xx Developer Manual" section "DMA Controller".
For the new platforms the description of the controller and peripheral devices
comes from Device Tree or ACPI.
-NSSP MASTER SAMPLE
-------------------
+NSSP HOST SAMPLE
+----------------
Below is a sample configuration using the PXA255 NSSP for a legacy platform::
static struct resource pxa_spi_nssp_resources[] = {
@@ -57,7 +57,7 @@ Below is a sample configuration using the PXA255 NSSP for a legacy platform::
},
};
- static struct pxa2xx_spi_controller pxa_nssp_master_info = {
+ static struct pxa2xx_spi_controller pxa_nssp_controller_info = {
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
.enable_dma = 1, /* Enables NSSP DMA */
};
@@ -68,7 +68,7 @@ Below is a sample configuration using the PXA255 NSSP for a legacy platform::
.resource = pxa_spi_nssp_resources,
.num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
.dev = {
- .platform_data = &pxa_nssp_master_info, /* Passed to driver */
+ .platform_data = &pxa_nssp_controller_info, /* Passed to driver */
},
};
@@ -81,17 +81,17 @@ Below is a sample configuration using the PXA255 NSSP for a legacy platform::
(void)platform_add_device(devices, ARRAY_SIZE(devices));
}
-Declaring Slave Devices
------------------------
-Typically, for a legacy platform, each SPI slave (chip) is defined in the
+Declaring peripheral devices
+----------------------------
+Typically, for a legacy platform, each SPI peripheral device (chip) is defined in the
arch/.../mach-*/board-*.c using the "spi_board_info" structure found in
"linux/spi/spi.h". See "Documentation/spi/spi-summary.rst" for additional
information.
-Each slave device attached to the PXA must provide slave specific configuration
+Each peripheral device (chip) attached to the PXA2xx must provide specific chip configuration
information via the structure "pxa2xx_spi_chip" found in
-"include/linux/spi/pxa2xx_spi.h". The pxa2xx_spi master controller driver
-will uses the configuration whenever the driver communicates with the slave
+"include/linux/spi/pxa2xx_spi.h". The PXA2xx host controller driver will use
+the configuration whenever the driver communicates with the peripheral
device. All fields are optional.
::
@@ -123,7 +123,7 @@ dma_burst_size == 0.
The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
trailing bytes in the SSP receiver FIFO. The correct value for this field is
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
-slave device. Please note that the PXA2xx SSP 1 does not support trailing byte
+peripheral device. Please note that the PXA2xx SSP 1 does not support trailing byte
timeouts and must busy-wait any trailing bytes.
NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
@@ -132,8 +132,8 @@ asserted around the complete message. Use SSPFRM as a GPIO (through a descriptor
to accommodate these chips.
-NSSP SLAVE SAMPLE
------------------
+NSSP PERIPHERAL SAMPLE
+----------------------
For a legacy platform or in some other cases, the pxa2xx_spi_chip structure
is passed to the pxa2xx_spi driver in the "spi_board_info.controller_data"
field. Below is a sample configuration using the PXA255 NSSP.
@@ -161,16 +161,16 @@ field. Below is a sample configuration using the PXA255 NSSP.
.bus_num = 2, /* Framework bus number */
.chip_select = 0, /* Framework chip select */
.platform_data = NULL; /* No spi_driver specific config */
- .controller_data = &cs8415a_chip_info, /* Master chip config */
- .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ .controller_data = &cs8415a_chip_info, /* Host controller config */
+ .irq = STREETRACER_APCI_IRQ, /* Peripheral device interrupt */
},
{
.modalias = "cs8405a", /* Name of spi_driver for this device */
.max_speed_hz = 3686400, /* Run SSP as fast a possible */
.bus_num = 2, /* Framework bus number */
.chip_select = 1, /* Framework chip select */
- .controller_data = &cs8405a_chip_info, /* Master chip config */
- .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ .controller_data = &cs8405a_chip_info, /* Host controller config */
+ .irq = STREETRACER_APCI_IRQ, /* Peripheral device interrupt */
},
};
@@ -193,17 +193,14 @@ mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
a per "spi_transfer" basis::
- if !enable_dma then
- always use PIO transfers
+ if spi_message.len > 65536 then
+ if spi_message.is_dma_mapped or rx_dma_buf != 0 or tx_dma_buf != 0 then
+ reject premapped transfers
- if spi_message.len > 8191 then
print "rate limited" warning
use PIO transfers
- if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
- use coherent DMA mode
-
- if rx_buf and tx_buf are aligned on 8 byte boundary then
+ if enable_dma and the size is in the range [DMA burst size..65536] then
use streaming DMA mode
otherwise
diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
index 4429447b0965..1759c6b452d6 100644
--- a/Documentation/translations/ja_JP/SubmitChecklist
+++ b/Documentation/translations/ja_JP/SubmitChecklist
@@ -56,8 +56,8 @@ Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿è€…å‘ã‘ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
9: sparseを利用ã—ã¦ã¡ã‚ƒã‚“ã¨ã—ãŸã‚³ãƒ¼ãƒ‰ãƒã‚§ãƒƒã‚¯ã‚’ã—ã¦ãã ã•ã„。
-10: 'make checkstack' 㨠'make namespacecheck' を利用ã—ã€å•é¡ŒãŒç™ºè¦‹ã•ã‚ŒãŸã‚‰
- 修正ã—ã¦ãã ã•ã„。'make checkstack' ã¯æ˜Žç¤ºçš„ã«å•é¡Œã‚’示ã—ã¾ã›ã‚“ãŒã€ã©ã‚Œã‹
+10: 'make checkstack' を利用ã—ã€å•é¡ŒãŒç™ºè¦‹ã•ã‚ŒãŸã‚‰ä¿®æ­£ã—ã¦ãã ã•ã„。
+ 'make checkstack' ã¯æ˜Žç¤ºçš„ã«å•é¡Œã‚’示ã—ã¾ã›ã‚“ãŒã€ã©ã‚Œã‹
1ã¤ã®é–¢æ•°ãŒ512ãƒã‚¤ãƒˆã‚ˆã‚Šå¤§ãã„スタックを使ã£ã¦ã„ã‚Œã°ã€ä¿®æ­£ã™ã¹ã候補ã¨
ãªã‚Šã¾ã™ã€‚
diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst
index 3d6ee21c74ae..10536b74aeec 100644
--- a/Documentation/translations/zh_CN/process/submit-checklist.rst
+++ b/Documentation/translations/zh_CN/process/submit-checklist.rst
@@ -53,8 +53,7 @@ Linux内核补ä¸æ交检查å•
9) 通过 sparse 清查。
(å‚è§ Documentation/translations/zh_CN/dev-tools/sparse.rst )
-10) 使用 ``make checkstack`` å’Œ ``make namespacecheck`` 并修å¤ä»–们å‘现的任何
- 问题。
+10) 使用 ``make checkstack`` 并修å¤ä»–们å‘现的任何问题。
.. note::
diff --git a/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst b/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
index 3076402406c4..abc6709ec3b2 100644
--- a/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
+++ b/Documentation/translations/zh_CN/scheduler/sched-design-CFS.rst
@@ -80,7 +80,7 @@ p->se.vruntime。一旦p->se.vruntimeå˜å¾—足够大,其它的任务将æˆä¸ºæ
CFS使用纳秒粒度的计时,ä¸ä¾èµ–于任何jiffies或HZ的细节。因此CFS并ä¸åƒä¹‹å‰çš„调度器那样
有“时间片â€çš„概念,也没有任何å¯å‘å¼çš„设计。唯一å¯è°ƒçš„å‚数(你需è¦æ‰“å¼€CONFIG_SCHED_DEBUG)是:
- /sys/kernel/debug/sched/min_granularity_ns
+ /sys/kernel/debug/sched/base_slice_ns
它å¯ä»¥ç”¨æ¥å°†è°ƒåº¦å™¨ä»Žâ€œæ¡Œé¢â€æ¨¡å¼ï¼ˆä¹Ÿå°±æ˜¯ä½Žæ—¶å»¶ï¼‰è°ƒèŠ‚为“æœåŠ¡å™¨â€ï¼ˆä¹Ÿå°±æ˜¯é«˜æ‰¹å¤„ç†ï¼‰æ¨¡å¼ã€‚
它的默认设置是适åˆæ¡Œé¢çš„工作负载。SCHED_BATCH也被CFS调度器模å—处ç†ã€‚
@@ -147,7 +147,7 @@ array)。
这个函数的行为基本上是出队,紧接ç€å…¥é˜Ÿï¼Œé™¤éžcompat_yield sysctl被开å¯ã€‚在那ç§æƒ…况下,
它将调度实体放在红黑树的最å³ç«¯ã€‚
- - check_preempt_curr(...)
+ - wakeup_preempt(...)
这个函数检查进入å¯è¿è¡ŒçŠ¶æ€çš„任务能å¦æŠ¢å å½“å‰æ­£åœ¨è¿è¡Œçš„任务。
@@ -155,9 +155,9 @@ array)。
这个函数选择接下æ¥æœ€é€‚åˆè¿è¡Œçš„任务。
- - set_curr_task(...)
+ - set_next_task(...)
- 这个函数在任务改å˜è°ƒåº¦ç±»æˆ–改å˜ä»»åŠ¡ç»„时被调用。
+ 这个函数在任务改å˜è°ƒåº¦ç±»ï¼Œæ”¹å˜ä»»åŠ¡ç»„时,或者任务被调度时被调用。
- task_tick(...)
diff --git a/Documentation/translations/zh_CN/scheduler/schedutil.rst b/Documentation/translations/zh_CN/scheduler/schedutil.rst
index d1ea68007520..7c8d87f21c42 100644
--- a/Documentation/translations/zh_CN/scheduler/schedutil.rst
+++ b/Documentation/translations/zh_CN/scheduler/schedutil.rst
@@ -89,16 +89,15 @@ r_cpu被定义为当å‰CPU的最高性能水平与系统中任何其它CPU的最
- Documentation/translations/zh_CN/scheduler/sched-capacity.rst:"1. CPU Capacity + 2. Task utilization"
-UTIL_EST / UTIL_EST_FASTUP
-==========================
+UTIL_EST
+========
由于周期性任务的平å‡æ•°åœ¨ç¡çœ æ—¶ä¼šè¡°å‡ï¼Œè€Œåœ¨è¿è¡Œæ—¶å…¶é¢„期利用率会和ç¡çœ å‰ç›¸åŒï¼Œ
因此它们在å†æ¬¡è¿è¡ŒåŽä¼šé¢ä¸´ï¼ˆDVFS)的上涨。
为了缓解这个问题,(一个默认使能的编译选项)UTIL_EST驱动一个无é™è„‰å†²å“应
(Infinite Impulse Response,IIR)的EWMA,“è¿è¡Œâ€å€¼åœ¨å‡ºé˜Ÿæ—¶æ˜¯æœ€é«˜çš„。
-å¦ä¸€ä¸ªé»˜è®¤ä½¿èƒ½çš„编译选项UTIL_EST_FASTUP修改了IIR滤波器,使其å…许立å³å¢žåŠ ï¼Œ
-仅在利用率下é™æ—¶è¡°å‡ã€‚
+UTIL_EST滤波使其在é‡åˆ°æ›´é«˜å€¼æ—¶ç«‹åˆ»å¢žåŠ ï¼Œè€Œé‡åˆ°ä½Žå€¼æ—¶ä¼šç¼“慢衰å‡ã€‚
进一步,è¿è¡Œé˜Ÿåˆ—的(å¯è¿è¡Œä»»åŠ¡çš„)利用率之和由下å¼è®¡ç®—:
diff --git a/Documentation/translations/zh_TW/process/submit-checklist.rst b/Documentation/translations/zh_TW/process/submit-checklist.rst
index 942962d1e2f4..dda456a73147 100644
--- a/Documentation/translations/zh_TW/process/submit-checklist.rst
+++ b/Documentation/translations/zh_TW/process/submit-checklist.rst
@@ -56,8 +56,7 @@ Linux內核補ä¸æ交檢查單
9) é€šéŽ sparse 清查。
(åƒè¦‹ Documentation/translations/zh_CN/dev-tools/sparse.rst )
-10) 使用 ``make checkstack`` å’Œ ``make namespacecheck`` 並修復他們發ç¾çš„任何
- å•é¡Œã€‚
+10) 使用 ``make checkstack`` 並修復他們發ç¾çš„任何å•é¡Œã€‚
.. note::
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 031df47a7c19..8be8b1979194 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -33,6 +33,7 @@ place where this information is gathered.
sysfs-platform_profile
vduse
futex2
+ lsm
.. only:: subproject and html
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 4ea5b837399a..d8b6cb1a3636 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -349,6 +349,10 @@ Code Seq# Include File Comments
<mailto:vgo@ratio.de>
0xB1 00-1F PPPoX
<mailto:mostrows@styx.uwaterloo.ca>
+0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API
+ <mailto:linuxppc-dev>
+0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API
+ <mailto:linuxppc-dev>
0xB3 00 linux/mmc/ioctl.h
0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
diff --git a/Documentation/userspace-api/lsm.rst b/Documentation/userspace-api/lsm.rst
new file mode 100644
index 000000000000..a76da373841b
--- /dev/null
+++ b/Documentation/userspace-api/lsm.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+.. Copyright (C) 2022 Intel Corporation
+
+=====================================
+Linux Security Modules
+=====================================
+
+:Author: Casey Schaufler
+:Date: July 2023
+
+Linux security modules (LSM) provide a mechanism to implement
+additional access controls to the Linux security policies.
+
+The various security modules may support any of these attributes:
+
+``LSM_ATTR_CURRENT`` is the current, active security context of the
+process.
+The proc filesystem provides this value in ``/proc/self/attr/current``.
+This is supported by the SELinux, Smack and AppArmor security modules.
+Smack also provides this value in ``/proc/self/attr/smack/current``.
+AppArmor also provides this value in ``/proc/self/attr/apparmor/current``.
+
+``LSM_ATTR_EXEC`` is the security context of the process at the time the
+current image was executed.
+The proc filesystem provides this value in ``/proc/self/attr/exec``.
+This is supported by the SELinux and AppArmor security modules.
+AppArmor also provides this value in ``/proc/self/attr/apparmor/exec``.
+
+``LSM_ATTR_FSCREATE`` is the security context of the process used when
+creating file system objects.
+The proc filesystem provides this value in ``/proc/self/attr/fscreate``.
+This is supported by the SELinux security module.
+
+``LSM_ATTR_KEYCREATE`` is the security context of the process used when
+creating key objects.
+The proc filesystem provides this value in ``/proc/self/attr/keycreate``.
+This is supported by the SELinux security module.
+
+``LSM_ATTR_PREV`` is the security context of the process at the time the
+current security context was set.
+The proc filesystem provides this value in ``/proc/self/attr/prev``.
+This is supported by the SELinux and AppArmor security modules.
+AppArmor also provides this value in ``/proc/self/attr/apparmor/prev``.
+
+``LSM_ATTR_SOCKCREATE`` is the security context of the process used when
+creating socket objects.
+The proc filesystem provides this value in ``/proc/self/attr/sockcreate``.
+This is supported by the SELinux security module.
+
+Kernel interface
+================
+
+Set a security attribute of the current process
+-----------------------------------------------
+
+.. kernel-doc:: security/lsm_syscalls.c
+ :identifiers: sys_lsm_set_self_attr
+
+Get the specified security attributes of the current process
+------------------------------------------------------------
+
+.. kernel-doc:: security/lsm_syscalls.c
+ :identifiers: sys_lsm_get_self_attr
+
+.. kernel-doc:: security/lsm_syscalls.c
+ :identifiers: sys_lsm_list_modules
+
+Additional documentation
+========================
+
+* Documentation/security/lsm.rst
+* Documentation/security/lsm-development.rst
diff --git a/MAINTAINERS b/MAINTAINERS
index f5c2450fa4ec..338e12a0e4d2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3339,13 +3339,17 @@ M: Eric Paris <eparis@redhat.com>
L: audit@vger.kernel.org
S: Supported
W: https://github.com/linux-audit
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
+Q: https://patchwork.kernel.org/project/audit/list
+B: mailto:audit@vger.kernel.org
+P: https://github.com/linux-audit/audit-kernel/blob/main/README.md
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
F: include/asm-generic/audit_*.h
F: include/linux/audit.h
F: include/linux/audit_arch.h
F: include/uapi/linux/audit.h
F: kernel/audit*
F: lib/*audit.c
+K: \baudit_[a-z_0-9]\+\b
AUXILIARY BUS DRIVER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -3404,6 +3408,16 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
F: drivers/hwmon/axi-fan-control.c
+AXI SPI ENGINE
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+R: David Lechner <dlechner@baylibre.com>
+L: linux-spi@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
+F: drivers/spi/spi-axi-spi-engine.c
+
AXXIA I2C CONTROLLER
M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
L: linux-i2c@vger.kernel.org
@@ -4127,7 +4141,6 @@ M: Franky Lin <franky.lin@broadcom.com>
M: Hante Meuleman <hante.meuleman@broadcom.com>
L: linux-wireless@vger.kernel.org
L: brcm80211-dev-list.pdl@broadcom.com
-L: SHA-cyfmac-dev-list@infineon.com
S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
@@ -5340,6 +5353,7 @@ L: linux-mm@kvack.org
S: Maintained
F: mm/memcontrol.c
F: mm/swap_cgroup.c
+F: samples/cgroup/*
F: tools/testing/selftests/cgroup/memcg_protection.m
F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c
F: tools/testing/selftests/cgroup/test_kmem.c
@@ -6902,8 +6916,8 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
-M: Zack Rusin <zackr@vmware.com>
-R: VMware Graphics Reviewers <linux-graphics-maintainer@vmware.com>
+M: Zack Rusin <zack.rusin@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -7926,6 +7940,7 @@ F: include/uapi/linux/ext4.h
Extended Verification Module (EVM)
M: Mimi Zohar <zohar@linux.ibm.com>
+M: Roberto Sassu <roberto.sassu@huawei.com>
L: linux-integrity@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
@@ -8104,6 +8119,7 @@ F: include/trace/events/fs_dax.h
FILESYSTEMS (VFS and infrastructure)
M: Alexander Viro <viro@zeniv.linux.org.uk>
M: Christian Brauner <brauner@kernel.org>
+R: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/*
@@ -8124,6 +8140,16 @@ F: fs/exportfs/
F: fs/fhandle.c
F: include/linux/exportfs.h
+FILESYSTEMS [IDMAPPED MOUNTS]
+M: Christian Brauner <brauner@kernel.org>
+M: Seth Forshee <sforshee@kernel.org>
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+F: Documentation/filesystems/idmappings.rst
+F: fs/mnt_idmapping.c
+F: include/linux/mnt_idmapping.*
+F: tools/testing/selftests/mount_setattr/
+
FILESYSTEMS [IOMAP]
M: Christian Brauner <brauner@kernel.org>
R: Darrick J. Wong <djwong@kernel.org>
@@ -8133,6 +8159,15 @@ S: Supported
F: fs/iomap/
F: include/linux/iomap.h
+FILESYSTEMS [STACKABLE]
+M: Miklos Szeredi <miklos@szeredi.hu>
+M: Amir Goldstein <amir73il@gmail.com>
+L: linux-fsdevel@vger.kernel.org
+L: linux-unionfs@vger.kernel.org
+S: Maintained
+F: fs/backing-file.c
+F: include/linux/backing-file.h
+
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
L: linux-hwmon@vger.kernel.org
@@ -8991,7 +9026,7 @@ F: drivers/gpio/gpio-mockup.c
F: tools/testing/selftests/gpio/
GPIO REGMAP
-M: Michael Walle <michael@walle.cc>
+M: Michael Walle <mwalle@kernel.org>
S: Maintained
F: drivers/gpio/gpio-regmap.c
F: include/linux/gpio/regmap.h
@@ -9263,7 +9298,6 @@ F: drivers/char/hw_random/
F: include/linux/hw_random.h
HARDWARE SPINLOCK CORE
-M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <andersson@kernel.org>
R: Baolin Wang <baolin.wang7@gmail.com>
L: linux-remoteproc@vger.kernel.org
@@ -9768,7 +9802,6 @@ F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
F: drivers/net/ethernet/huawei/hinic/
HUGETLB SUBSYSTEM
-M: Mike Kravetz <mike.kravetz@oracle.com>
M: Muchun Song <muchun.song@linux.dev>
L: linux-mm@kvack.org
S: Maintained
@@ -9792,8 +9825,8 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/platform/st/sti/hva
HWPOISON MEMORY FAILURE HANDLING
-M: Naoya Horiguchi <naoya.horiguchi@nec.com>
-R: Miaohe Lin <linmiaohe@huawei.com>
+M: Miaohe Lin <linmiaohe@huawei.com>
+R: Naoya Horiguchi <naoya.horiguchi@nec.com>
L: linux-mm@kvack.org
S: Maintained
F: mm/hwpoison-inject.c
@@ -10204,16 +10237,6 @@ S: Maintained
W: https://github.com/o2genum/ideapad-slidebar
F: drivers/input/misc/ideapad_slidebar.c
-IDMAPPED MOUNTS
-M: Christian Brauner <brauner@kernel.org>
-M: Seth Forshee <sforshee@kernel.org>
-L: linux-fsdevel@vger.kernel.org
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping.git
-F: Documentation/filesystems/idmappings.rst
-F: include/linux/mnt_idmapping.*
-F: tools/testing/selftests/mount_setattr/
-
IDT VersaClock 5 CLOCK DRIVER
M: Luca Ceresoli <luca@lucaceresoli.net>
S: Maintained
@@ -10506,7 +10529,9 @@ F: drivers/crypto/inside-secure/
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.ibm.com>
+M: Roberto Sassu <roberto.sassu@huawei.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
+R: Eric Snowberg <eric.snowberg@oracle.com>
L: linux-integrity@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
@@ -12244,21 +12269,21 @@ S: Orphan
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
-LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
+LINUX FOR POWERPC EMBEDDED PPC85XX
M: Scott Wood <oss@buserror.net>
L: linuxppc-dev@lists.ozlabs.org
S: Odd fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git
F: Documentation/devicetree/bindings/cache/freescale-l2cache.txt
F: Documentation/devicetree/bindings/powerpc/fsl/
-F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
-LINUX FOR POWERPC EMBEDDED PPC8XX
+LINUX FOR POWERPC EMBEDDED PPC8XX AND PPC83XX
M: Christophe Leroy <christophe.leroy@csgroup.eu>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: arch/powerpc/platforms/8xx/
+F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
@@ -12405,6 +12430,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: Documentation/admin-guide/LSM/LoadPin.rst
F: security/loadpin/
+LOCKDOWN SECURITY MODULE
+L: linux-security-module@vger.kernel.org
+S: Odd Fixes
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+F: security/lockdown/
+
LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
@@ -12416,7 +12447,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
F: Documentation/locking/
F: arch/*/include/asm/spinlock*.h
-F: include/linux/lockdep.h
+F: include/linux/lockdep*.h
F: include/linux/mutex*.h
F: include/linux/rwlock*.h
F: include/linux/rwsem*.h
@@ -12824,7 +12855,7 @@ S: Maintained
F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MVPP2 ETHERNET DRIVER
-M: Marcin Wojtas <mw@semihalf.com>
+M: Marcin Wojtas <marcin.s.wojtas@gmail.com>
M: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
S: Maintained
@@ -15087,6 +15118,7 @@ K: \bmdo_
NETWORKING [MPTCP]
M: Matthieu Baerts <matttbe@kernel.org>
M: Mat Martineau <martineau@kernel.org>
+R: Geliang Tang <geliang.tang@linux.dev>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
S: Maintained
@@ -15434,7 +15466,7 @@ F: Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
F: drivers/bluetooth/btnxpuart.c
NXP C45 TJA11XX PHY DRIVER
-M: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+M: Andrei Botila <andrei.botila@oss.nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/nxp-c45-tja11xx.c
@@ -15705,9 +15737,8 @@ F: Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml
F: drivers/gpio/gpio-omap.c
OMAP HARDWARE SPINLOCK SUPPORT
-M: Ohad Ben-Cohen <ohad@wizery.com>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/hwspinlock/omap_hwspinlock.c
OMAP HS MMC SUPPORT
@@ -16458,11 +16489,10 @@ F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/controller/dwc/pcie-armada8k.c
PCI DRIVER FOR CADENCE PCIE IP
-M: Tom Joseph <tjoseph@cadence.com>
L: linux-pci@vger.kernel.org
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/pci/cdns,*
-F: drivers/pci/controller/cadence/
+F: drivers/pci/controller/cadence/*cadence*
PCI DRIVER FOR FREESCALE LAYERSCAPE
M: Minghuan Lian <minghuan.Lian@nxp.com>
@@ -19444,22 +19474,29 @@ SECURITY SUBSYSTEM
M: Paul Moore <paul@paul-moore.com>
M: James Morris <jmorris@namei.org>
M: "Serge E. Hallyn" <serge@hallyn.com>
-L: linux-security-module@vger.kernel.org (suggested Cc:)
+L: linux-security-module@vger.kernel.org
S: Supported
-W: http://kernsec.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+Q: https://patchwork.kernel.org/project/linux-security-module/list
+B: mailto:linux-security-module@vger.kernel.org
+P: https://github.com/LinuxSecurityModule/kernel/blob/main/README.md
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+F: include/uapi/linux/lsm.h
F: security/
+F: tools/testing/selftests/lsm/
X: security/selinux/
+K: \bsecurity_[a-z_0-9]\+\b
SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
M: Stephen Smalley <stephen.smalley.work@gmail.com>
-M: Eric Paris <eparis@parisplace.org>
+R: Ondrej Mosnacek <omosnace@redhat.com>
L: selinux@vger.kernel.org
S: Supported
-W: https://selinuxproject.org
W: https://github.com/SELinuxProject
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
+Q: https://patchwork.kernel.org/project/selinux/list
+B: mailto:selinux@vger.kernel.org
+P: https://github.com/SELinuxProject/selinux-kernel/blob/main/README.md
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
F: Documentation/ABI/removed/sysfs-selinux-checkreqprot
F: Documentation/ABI/removed/sysfs-selinux-disable
F: Documentation/admin-guide/LSM/SELinux.rst
@@ -19831,7 +19868,7 @@ W: http://www.winischhofer.at/linuxsisusbvga.shtml
F: drivers/usb/misc/sisusbvga/
SL28 CPLD MFD DRIVER
-M: Michael Walle <michael@walle.cc>
+M: Michael Walle <mwalle@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
F: Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
@@ -19846,7 +19883,7 @@ F: drivers/pwm/pwm-sl28cpld.c
F: drivers/watchdog/sl28cpld_wdt.c
SL28 VPD NVMEM LAYOUT DRIVER
-M: Michael Walle <michael@walle.cc>
+M: Michael Walle <mwalle@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
F: drivers/nvmem/layouts/sl28vpd.c
@@ -20356,7 +20393,7 @@ F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM
M: Tudor Ambarus <tudor.ambarus@linaro.org>
M: Pratyush Yadav <pratyush@kernel.org>
-R: Michael Walle <michael@walle.cc>
+M: Michael Walle <mwalle@kernel.org>
L: linux-mtd@lists.infradead.org
S: Maintained
W: http://www.linux-mtd.infradead.org/
@@ -21020,6 +21057,13 @@ L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/dw_mmc*
+SYNOPSYS DESIGNWARE PCIE PMU DRIVER
+M: Shuai Xue <xueshuai@linux.alibaba.com>
+M: Jing Zhang <renyu.zj@linux.alibaba.com>
+S: Supported
+F: Documentation/admin-guide/perf/dwc_pcie_pmu.rst
+F: drivers/perf/dwc_pcie_pmu.c
+
SYNOPSYS HSDK RESET CONTROLLER DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
S: Supported
@@ -23216,9 +23260,8 @@ F: drivers/misc/vmw_vmci/
F: include/linux/vmw_vmci*
VMWARE VMMOUSE SUBDRIVER
-M: Zack Rusin <zackr@vmware.com>
-R: VMware Graphics Reviewers <linux-graphics-maintainer@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Zack Rusin <zack.rusin@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-input@vger.kernel.org
S: Supported
F: drivers/input/mouse/vmmouse.c
diff --git a/Makefile b/Makefile
index e1a652205d06..f1b2fd977275 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1576,7 +1576,8 @@ help:
echo ' (default: $(INSTALL_HDR_PATH))'; \
echo ''
@echo 'Static analysers:'
- @echo ' checkstack - Generate a list of stack hogs'
+ @echo ' checkstack - Generate a list of stack hogs and consider all functions'
+ @echo ' with a stack size larger than MINSTACKSIZE (default: 100)'
@echo ' versioncheck - Sanity check on version.h usage'
@echo ' includecheck - Check for duplicate included header files'
@echo ' export_report - List the usages of all exported symbols'
@@ -2016,9 +2017,10 @@ CHECKSTACK_ARCH := $(SUBARCH)
else
CHECKSTACK_ARCH := $(ARCH)
endif
+MINSTACKSIZE ?= 100
checkstack:
$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
- $(PERL) $(srctree)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
+ $(PERL) $(srctree)/scripts/checkstack.pl $(CHECKSTACK_ARCH) $(MINSTACKSIZE)
kernelrelease:
@$(filechk_kernel.release)
diff --git a/arch/Kconfig b/arch/Kconfig
index f4b210ab0612..5ca66aad0d08 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -301,17 +301,8 @@ config ARCH_HAS_DMA_CLEAR_UNCACHED
config ARCH_HAS_CPU_FINALIZE_INIT
bool
-# Select if arch init_task must go in the __init_task_data section
-config ARCH_TASK_STRUCT_ON_STACK
- bool
-
-# Select if arch has its private alloc_task_struct() function
-config ARCH_TASK_STRUCT_ALLOCATOR
- bool
-
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool
- depends on !ARCH_TASK_STRUCT_ALLOCATOR
help
An architecture should select this to provide hardened usercopy
knowledge about what region of the thread_struct should be
@@ -320,10 +311,6 @@ config HAVE_ARCH_THREAD_STRUCT_WHITELIST
should be implemented. Without this, the entire thread_struct
field in task_struct will be left whitelisted.
-# Select if arch has its private alloc_thread_stack() function
-config ARCH_THREAD_STACK_ALLOCATOR
- bool
-
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
@@ -1470,6 +1457,14 @@ config DYNAMIC_SIGFRAME
config HAVE_ARCH_NODE_DEV_GROUP
bool
+config ARCH_HAS_HW_PTE_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in PTE entries when using them as part of linear address
+ translations. Architectures that require runtime check should select
+ this option and override arch_has_hw_pte_young().
+
config ARCH_HAS_NONLEAF_PMD_YOUNG
bool
help
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 18c842ca6c32..8ff110826ce2 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -496,3 +496,8 @@
564 common futex_wake sys_futex_wake
565 common futex_wait sys_futex_wait
566 common futex_requeue sys_futex_requeue
+567 common statmount sys_statmount
+568 common listmount sys_listmount
+569 common lsm_get_self_attr sys_lsm_get_self_attr
+570 common lsm_set_self_attr sys_lsm_set_self_attr
+571 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 1cc74f7b50ef..6a779b9018fd 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -4,7 +4,6 @@
#
asflags-y := $(KBUILD_CFLAGS)
-ccflags-y := -Werror
# Many of these routines have implementations tuned for ev6.
# Choose them iff we're targeting ev6 specifically.
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index bd770302eb82..101dbd06b4ce 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -3,6 +3,4 @@
# Makefile for the linux alpha-specific parts of the memory manager.
#
-ccflags-y := -Werror
-
obj-y := init.o fault.o
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f8567e95f98b..b2ab8db63c4b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1362,7 +1362,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index c7d2510e5a78..853c4f81ba4a 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -13,6 +13,7 @@
#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
#endif
/* Replace task scheduler's default cpu-invariant accounting */
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 1ae99deeec54..8fc080c9e4fb 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -268,10 +268,8 @@ static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
static void armv6pmu_enable_event(struct perf_event *event)
{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long val, mask, evt;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -294,12 +292,10 @@ static void armv6pmu_enable_event(struct perf_event *event)
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t
@@ -362,26 +358,20 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
@@ -419,10 +409,8 @@ static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
static void armv6pmu_disable_event(struct perf_event *event)
{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long val, mask, evt;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -444,20 +432,16 @@ static void armv6pmu_disable_event(struct perf_event *event)
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv6mpcore_pmu_disable_event(struct perf_event *event)
{
- unsigned long val, mask, flags, evt = 0;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long val, mask, evt = 0;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -475,12 +459,10 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event)
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv6_map_event(struct perf_event *event)
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index eb2190477da1..a3322e2b3ea4 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -870,10 +870,8 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
static void armv7pmu_enable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -886,7 +884,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -910,16 +907,12 @@ static void armv7pmu_enable_event(struct perf_event *event)
* Enable counter
*/
armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_disable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -931,7 +924,6 @@ static void armv7pmu_disable_event(struct perf_event *event)
/*
* Disable counter and interrupt
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -942,8 +934,6 @@ static void armv7pmu_disable_event(struct perf_event *event)
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
@@ -1009,24 +999,14 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
@@ -1072,8 +1052,10 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (attr->exclude_idle)
- return -EPERM;
+ if (attr->exclude_idle) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
if (attr->exclude_user)
config_base |= ARMV7_EXCLUDE_USER;
if (attr->exclude_kernel)
@@ -1492,14 +1474,10 @@ static void krait_clearpmu(u32 config_base)
static void krait_pmu_disable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* Disable counter and interrupt */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */
armv7_pmnc_disable_counter(idx);
@@ -1512,23 +1490,17 @@ static void krait_pmu_disable_event(struct perf_event *event)
/* Disable interrupt for this counter */
armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void krait_pmu_enable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */
armv7_pmnc_disable_counter(idx);
@@ -1548,8 +1520,6 @@ static void krait_pmu_enable_event(struct perf_event *event)
/* Enable counter */
armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void krait_pmu_reset(void *info)
@@ -1825,14 +1795,10 @@ static void scorpion_clearpmu(u32 config_base)
static void scorpion_pmu_disable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* Disable counter and interrupt */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */
armv7_pmnc_disable_counter(idx);
@@ -1845,23 +1811,17 @@ static void scorpion_pmu_disable_event(struct perf_event *event)
/* Disable interrupt for this counter */
armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void scorpion_pmu_enable_event(struct perf_event *event)
{
- unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable counter */
armv7_pmnc_disable_counter(idx);
@@ -1881,8 +1841,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
/* Enable counter */
armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void scorpion_pmu_reset(void *info)
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index f6cdcacfb96d..7a2ba1c689a7 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -203,10 +203,8 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void xscale1pmu_enable_event(struct perf_event *event)
{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long val, mask, evt;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
switch (idx) {
@@ -229,20 +227,16 @@ static void xscale1pmu_enable_event(struct perf_event *event)
return;
}
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void xscale1pmu_disable_event(struct perf_event *event)
{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long val, mask, evt;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
switch (idx) {
@@ -263,12 +257,10 @@ static void xscale1pmu_disable_event(struct perf_event *event)
return;
}
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
@@ -300,26 +292,20 @@ static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static inline u64 xscale1pmu_read_counter(struct perf_event *event)
@@ -549,10 +535,8 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
static void xscale2pmu_enable_event(struct perf_event *event)
{
- unsigned long flags, ien, evtsel;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long ien, evtsel;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -587,18 +571,14 @@ static void xscale2pmu_enable_event(struct perf_event *event)
return;
}
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void xscale2pmu_disable_event(struct perf_event *event)
{
- unsigned long flags, ien, evtsel, of_flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ unsigned long ien, evtsel, of_flags;
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -638,11 +618,9 @@ static void xscale2pmu_disable_event(struct perf_event *event)
return;
}
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
xscale2pmu_write_overflow_flags(of_flags);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
@@ -663,26 +641,20 @@ out:
static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ unsigned long val;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static inline u64 xscale2pmu_read_counter(struct perf_event *event)
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index cb63921232a6..277f6aa8e6c2 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -803,16 +803,16 @@ static int __init sunxi_mc_smp_init(void)
for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
ret = of_property_match_string(node, "enable-method",
sunxi_mc_smp_data[i].enable_method);
- if (!ret)
+ if (ret >= 0)
break;
}
- is_a83t = sunxi_mc_smp_data[i].is_a83t;
-
of_node_put(node);
- if (ret)
+ if (ret < 0)
return -ENODEV;
+ is_a83t = sunxi_mc_smp_data[i].is_a83t;
+
if (!sunxi_mc_smp_cpu_table_init())
return -EINVAL;
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 584f9528c996..b6c9e01e14f5 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -470,3 +470,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7b071a00425d..8f6cf1221b6a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@ config ARM64
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
@@ -154,7 +155,7 @@ config ARM64
select HAVE_MOVE_PUD
select HAVE_PCI
select HAVE_ACPI_APEI if (ACPI && EFI)
- select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_COMPILER_H
@@ -1519,15 +1520,15 @@ config XEN
# include/linux/mmzone.h requires the following to be true:
#
-# MAX_ORDER + PAGE_SHIFT <= SECTION_SIZE_BITS
+# MAX_PAGE_ORDER + PAGE_SHIFT <= SECTION_SIZE_BITS
#
-# so the maximum value of MAX_ORDER is SECTION_SIZE_BITS - PAGE_SHIFT:
+# so the maximum value of MAX_PAGE_ORDER is SECTION_SIZE_BITS - PAGE_SHIFT:
#
-# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER |
-# ----+-------------------+--------------+-----------------+--------------------+
-# 4K | 27 | 12 | 15 | 10 |
-# 16K | 27 | 14 | 13 | 11 |
-# 64K | 29 | 16 | 13 | 13 |
+# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_PAGE_ORDER | default MAX_PAGE_ORDER |
+# ----+-------------------+--------------+----------------------+-------------------------+
+# 4K | 27 | 12 | 15 | 10 |
+# 16K | 27 | 14 | 13 | 11 |
+# 64K | 29 | 16 | 13 | 13 |
config ARCH_FORCE_MAX_ORDER
int
default "13" if ARM64_64K_PAGES
@@ -1535,21 +1536,21 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
large blocks of physically contiguous memory is required.
The maximal size of allocation cannot exceed the size of the
- section, so the value of MAX_ORDER should satisfy
+ section, so the value of MAX_PAGE_ORDER should satisfy
- MAX_ORDER + PAGE_SHIFT <= SECTION_SIZE_BITS
+ MAX_PAGE_ORDER + PAGE_SHIFT <= SECTION_SIZE_BITS
Don't change if unsure.
config UNMAP_KERNEL_AT_EL0
- bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+ bool "Unmap kernel when running in userspace (KPTI)" if EXPERT
default y
help
Speculation attacks against some high-performance processors can
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 9a2d3723cd0f..47ecc4cff9d2 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -200,7 +200,7 @@ endif
endif
vdso-install-y += arch/arm64/kernel/vdso/vdso.so.dbg
-vdso-install-$(CONFIG_COMPAT_VDSO) += arch/arm64/kernel/vdso32/vdso.so.dbg:vdso32.so
+vdso-install-$(CONFIG_COMPAT_VDSO) += arch/arm64/kernel/vdso32/vdso32.so.dbg
include $(srctree)/scripts/Makefile.defconf
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 1761f5972443..a5a787371117 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -44,7 +44,7 @@ EFI_ZBOOT_BFD_TARGET := elf64-littleaarch64
EFI_ZBOOT_MACH_TYPE := ARM64
EFI_ZBOOT_FORWARD_CFI := $(CONFIG_ARM64_BTI_KERNEL)
-EFI_ZBOOT_OBJCOPY_FLAGS = --add-symbol zboot_code_size=0x$(shell \
+EFI_ZBOOT_OBJCOPY_FLAGS = --add-symbol zboot_code_size=0x$$( \
$(NM) vmlinux|grep _kernel_codesize|cut -d' ' -f1)
include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 7399d706967a..9b7a09808a3d 100755
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -17,7 +17,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
-if [ "$(basename $2)" = "Image.gz" ]; then
+if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
+then
# Compressed install
echo "Installing compressed kernel"
base=vmlinuz
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 376a980f2bad..7b1975bf4b90 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -12,7 +12,7 @@
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
-#include <asm-generic/export.h>
+#include <linux/export.h>
#include <asm/alternative.h>
#include <asm/asm-bug.h>
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ceb368d33bf4..06a4670bdb0b 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -58,7 +58,6 @@ static inline unsigned int arch_slab_minalign(void)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0
-#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags;
/*
@@ -70,11 +69,6 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags);
}
-static __always_inline int icache_is_vpipt(void)
-{
- return test_bit(ICACHEF_VPIPT, &__icache_flags);
-}
-
static inline u32 cache_type_cwg(void)
{
return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f6d416fe49b0..21c824edf8ce 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -617,6 +617,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
}
+void __init setup_boot_cpu_features(void);
void __init setup_system_features(void);
void __init setup_user_features(void);
@@ -819,6 +820,11 @@ static inline bool system_supports_tlb_range(void)
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
}
+static inline bool system_supports_lpa2(void)
+{
+ return cpus_have_final_cap(ARM64_HAS_LPA2);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index cdf6a35e3994..cda81d009c9b 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -242,14 +242,6 @@
| (\nx << 5)
.endm
-/*
- * Zero the entire ZA array
- * ZERO ZA
- */
-.macro zero_za
- .inst 0xc00800ff
-.endm
-
.macro __for from:req, to:req
.if (\from) == (\to)
_for__body %\from
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 12d5f47f7dbe..7eefc525a9df 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -15,29 +15,9 @@
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+asmlinkage void kasan_early_init(void);
void kasan_init(void);
-
-/*
- * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
- * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
- * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
- *
- * KASAN_SHADOW_OFFSET:
- * This value is used to map an address to the corresponding shadow
- * address by the following formula:
- * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
- *
- * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
- * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
- * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
- * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
- * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
- */
-#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
-#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
-
void kasan_copy_shadow(pgd_t *pgdir);
-asmlinkage void kasan_early_init(void);
#else
static inline void kasan_init(void) { }
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 85d26143faa5..83ddb14b95a5 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -37,27 +37,12 @@
/*
- * If KASLR is enabled, then an offset K is added to the kernel address
- * space. The bottom 21 bits of this offset are zero to guarantee 2MB
- * alignment for PA and VA.
- *
- * For each pagetable level of the swapper, we know that the shift will
- * be larger than 21 (for the 4KB granule case we use section maps thus
- * the smallest shift is actually 30) thus there is the possibility that
- * KASLR can increase the number of pagetable entries by 1, so we make
- * room for this extra entry.
- *
- * Note KASLR cannot increase the number of required entries for a level
- * by more than one because it increments both the virtual start and end
- * addresses equally (the extra entry comes from the case where the end
- * address is just pushed over a boundary and the start address isn't).
+ * A relocatable kernel may execute from an address that differs from the one at
+ * which it was linked. In the worst case, its runtime placement may intersect
+ * with two adjacent PGDIR entries, which means that an additional page table
+ * may be needed at each subordinate level.
*/
-
-#ifdef CONFIG_RANDOMIZE_BASE
-#define EARLY_KASLR (1)
-#else
-#define EARLY_KASLR (0)
-#endif
+#define EXTRA_PAGE __is_defined(CONFIG_RELOCATABLE)
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
@@ -83,7 +68,7 @@
+ EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE))
/* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 49e0d4b36bd0..e3e793d0ec30 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -244,13 +244,6 @@ static inline size_t __invalidate_icache_max_range(void)
static inline void __invalidate_icache_guest_page(void *va, size_t size)
{
/*
- * VPIPT I-cache maintenance must be done from EL2. See comment in the
- * nVHE flavor of __kvm_tlb_flush_vmid_ipa().
- */
- if (icache_is_vpipt() && read_sysreg(CurrentEL) != CurrentEL_EL2)
- return;
-
- /*
* Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
* invalidation range exceeds our arbitrary limit on invadations by
* cache line.
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index d3e354bb8351..10068500d601 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -25,6 +25,8 @@
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U
#endif
+#define kvm_lpa2_is_enabled() false
+
static inline u64 kvm_get_parange(u64 mmfr0)
{
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index fde4186cc387..d82305ab420f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -65,15 +65,41 @@
#define KERNEL_END _end
/*
- * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
- * address space for the shadow region respectively. They can bloat the stack
- * significantly, so double the (minimum) stack size when they are in use.
+ * Generic and Software Tag-Based KASAN modes require 1/8th and 1/16th of the
+ * kernel virtual address space for storing the shadow memory respectively.
+ *
+ * The mapping between a virtual memory address and its corresponding shadow
+ * memory address is defined based on the formula:
+ *
+ * shadow_addr = (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
+ *
+ * where KASAN_SHADOW_SCALE_SHIFT is the order of the number of bits that map
+ * to a single shadow byte and KASAN_SHADOW_OFFSET is a constant that offsets
+ * the mapping. Note that KASAN_SHADOW_OFFSET does not point to the start of
+ * the shadow memory region.
+ *
+ * Based on this mapping, we define two constants:
+ *
+ * KASAN_SHADOW_START: the start of the shadow memory region;
+ * KASAN_SHADOW_END: the end of the shadow memory region.
+ *
+ * KASAN_SHADOW_END is defined first as the shadow address that corresponds to
+ * the upper bound of possible virtual kernel memory addresses UL(1) << 64
+ * according to the mapping formula.
+ *
+ * KASAN_SHADOW_START is defined second based on KASAN_SHADOW_END. The shadow
+ * memory start must map to the lowest possible kernel virtual memory address
+ * and thus it depends on the actual bitness of the address space.
+ *
+ * As KASAN inserts redzones between stack variables, this increases the stack
+ * memory usage significantly. Thus, we double the (minimum) stack size.
*/
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
-#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
- + KASAN_SHADOW_OFFSET)
-#define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT)))
+#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) + KASAN_SHADOW_OFFSET)
+#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
+#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
+#define PAGE_END KASAN_SHADOW_START
#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_THREAD_SHIFT 0
@@ -182,6 +208,7 @@
#include <linux/types.h>
#include <asm/boot.h>
#include <asm/bug.h>
+#include <asm/sections.h>
#if VA_BITS > 48
extern u64 vabits_actual;
@@ -193,15 +220,12 @@ extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-/* the virtual base of the kernel image */
-extern u64 kimage_vaddr;
-
/* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset;
static inline unsigned long kaslr_offset(void)
{
- return kimage_vaddr - KIMAGE_VADDR;
+ return (u64)&_text - KIMAGE_VADDR;
}
#ifdef CONFIG_RANDOMIZE_BASE
@@ -407,6 +431,5 @@ void dump_mem_limit(void);
#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8)
#endif
-#include <asm-generic/memory_model.h>
#endif /* __ASM_MEMORY_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index e9624f6326dd..483dbfa39c4c 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -71,6 +71,8 @@ extern bool arm64_use_ng_mappings;
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
+#define lpa2_is_enabled() false
+
/*
* If we have userspace only BTI we don't want to mark kernel pages
* guarded even if the system does support BTI.
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e5bc54522e71..5b0a04810b23 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -167,6 +167,9 @@ struct thread_struct {
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
+
+ struct user_fpsimd_state kernel_fpsimd_state;
+ unsigned int kernel_fpsimd_cpu;
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_user keys_user;
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
index 6a75d7ecdcaa..8e86c9e70e48 100644
--- a/arch/arm64/include/asm/simd.h
+++ b/arch/arm64/include/asm/simd.h
@@ -12,8 +12,6 @@
#include <linux/preempt.h>
#include <linux/types.h>
-DECLARE_PER_CPU(bool, fpsimd_context_busy);
-
#ifdef CONFIG_KERNEL_MODE_NEON
/*
@@ -28,17 +26,10 @@ static __must_check inline bool may_use_simd(void)
/*
* We must make sure that the SVE has been initialized properly
* before using the SIMD in kernel.
- * fpsimd_context_busy is only set while preemption is disabled,
- * and is clear whenever preemption is enabled. Since
- * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
- * cannot change under our feet -- if it's set we cannot be
- * migrated, and if it's clear we cannot be migrated to a CPU
- * where it is set.
*/
return !WARN_ON(!system_capabilities_finalized()) &&
system_supports_fpsimd() &&
- !in_hardirq() && !irqs_disabled() && !in_nmi() &&
- !this_cpu_read(fpsimd_context_busy);
+ !in_hardirq() && !irqs_disabled() && !in_nmi();
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 5f5437621029..8a8acc220371 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -10,7 +10,7 @@
/*
* Section size must be at least 512MB for 64K base
* page size config. Otherwise it will be less than
- * MAX_ORDER and the build process will fail.
+ * MAX_PAGE_ORDER and the build process will fail.
*/
#ifdef CONFIG_ARM64_64K_PAGES
#define SECTION_SIZE_BITS 29
diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 508f734de46e..f63dc654e545 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -9,7 +9,6 @@
#ifndef __ASM_STACKTRACE_COMMON_H
#define __ASM_STACKTRACE_COMMON_H
-#include <linux/kprobes.h>
#include <linux/types.h>
struct stack_info {
@@ -23,12 +22,6 @@ struct stack_info {
* @fp: The fp value in the frame record (or the real fp)
* @pc: The lr value in the frame record (or the real lr)
*
- * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
- * associated with the most recently encountered replacement lr
- * value.
- *
- * @task: The task being unwound.
- *
* @stack: The stack currently being unwound.
* @stacks: An array of stacks which can be unwound.
* @nr_stacks: The number of stacks in @stacks.
@@ -36,10 +29,6 @@ struct stack_info {
struct unwind_state {
unsigned long fp;
unsigned long pc;
-#ifdef CONFIG_KRETPROBES
- struct llist_node *kr_cur;
-#endif
- struct task_struct *task;
struct stack_info stack;
struct stack_info *stacks;
@@ -66,14 +55,8 @@ static inline bool stackinfo_on_stack(const struct stack_info *info,
return true;
}
-static inline void unwind_init_common(struct unwind_state *state,
- struct task_struct *task)
+static inline void unwind_init_common(struct unwind_state *state)
{
- state->task = task;
-#ifdef CONFIG_KRETPROBES
- state->kr_cur = NULL;
-#endif
-
state->stack = stackinfo_get_unknown();
}
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 25ab83a315a7..44759281d0d4 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -31,7 +31,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
unsigned long fp,
unsigned long pc)
{
- unwind_init_common(state, NULL);
+ unwind_init_common(state);
state->fp = fp;
state->pc = pc;
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5e65f51c10d2..c3b19b376c86 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -645,6 +645,7 @@
#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3)
#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0)
#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1)
+#define OP_AT_S1E1A sys_insn(AT_Op0, 0, AT_CRn, 9, 2)
#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0)
#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1)
#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4)
@@ -781,10 +782,16 @@
#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6)
/* Misc instructions */
+#define OP_GCSPUSHX sys_insn(1, 0, 7, 7, 4)
+#define OP_GCSPOPCX sys_insn(1, 0, 7, 7, 5)
+#define OP_GCSPOPX sys_insn(1, 0, 7, 7, 6)
+#define OP_GCSPUSHM sys_insn(1, 3, 7, 7, 0)
+
#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4)
#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5)
#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4)
#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5)
+#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6)
#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
/* Common SCTLR_ELx flags. */
@@ -871,10 +878,12 @@
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ARM64_MIN_PARANGE_BITS 32
@@ -882,6 +891,7 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
@@ -892,11 +902,13 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
@@ -1039,6 +1051,19 @@
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
+/*
+ * Permission Overlay Extension (POE) permission encodings.
+ */
+#define POE_NONE UL(0x0)
+#define POE_R UL(0x1)
+#define POE_X UL(0x2)
+#define POE_RX UL(0x3)
+#define POE_W UL(0x4)
+#define POE_RW UL(0x5)
+#define POE_XW UL(0x6)
+#define POE_RXW UL(0x7)
+#define POE_MASK UL(0xf)
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 553d1bc559c6..e72a3bf9e563 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -80,6 +80,7 @@ void arch_setup_new_exec(void);
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
+#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 846c563689a8..0150deb332af 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -22,15 +22,15 @@ static void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
/*
- * get the tlbi levels in arm64. Default value is 0 if more than one
- * of cleared_* is set or neither is set.
- * Arm64 doesn't support p4ds now.
+ * get the tlbi levels in arm64. Default value is TLBI_TTL_UNKNOWN if more than
+ * one of cleared_* is set or neither is set - this elides the level hinting to
+ * the hardware.
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
/* The TTL field is only valid for the leaf entry. */
if (tlb->freed_tables)
- return 0;
+ return TLBI_TTL_UNKNOWN;
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
@@ -47,7 +47,12 @@ static inline int tlb_get_level(struct mmu_gather *tlb)
tlb->cleared_p4ds))
return 1;
- return 0;
+ if (tlb->cleared_p4ds && !(tlb->cleared_ptes ||
+ tlb->cleared_pmds ||
+ tlb->cleared_puds))
+ return 0;
+
+ return TLBI_TTL_UNKNOWN;
}
static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index bb2c2833a987..1deb5d789c2e 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -94,19 +94,22 @@ static inline unsigned long get_trans_granule(void)
* When ARMv8.4-TTL exists, TLBI operations take an additional hint for
* the level at which the invalidation must take place. If the level is
* wrong, no invalidation may take place. In the case where the level
- * cannot be easily determined, a 0 value for the level parameter will
- * perform a non-hinted invalidation.
+ * cannot be easily determined, the value TLBI_TTL_UNKNOWN will perform
+ * a non-hinted invalidation. Any provided level outside the hint range
+ * will also cause fall-back to non-hinted invalidation.
*
* For Stage-2 invalidation, use the level values provided to that effect
* in asm/stage2_pgtable.h.
*/
#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
+#define TLBI_TTL_UNKNOWN INT_MAX
+
#define __tlbi_level(op, addr, level) do { \
u64 arg = addr; \
\
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
- level) { \
+ level >= 0 && level <= 3) { \
u64 ttl = level & 3; \
ttl |= get_trans_granule() << 2; \
arg &= ~TLBI_TTL_MASK; \
@@ -122,28 +125,34 @@ static inline unsigned long get_trans_granule(void)
} while (0)
/*
- * This macro creates a properly formatted VA operand for the TLB RANGE.
- * The value bit assignments are:
+ * This macro creates a properly formatted VA operand for the TLB RANGE. The
+ * value bit assignments are:
*
* +----------+------+-------+-------+-------+----------------------+
* | ASID | TG | SCALE | NUM | TTL | BADDR |
* +-----------------+-------+-------+-------+----------------------+
* |63 48|47 46|45 44|43 39|38 37|36 0|
*
- * The address range is determined by below formula:
- * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
+ * The address range is determined by below formula: [BADDR, BADDR + (NUM + 1) *
+ * 2^(5*SCALE + 1) * PAGESIZE)
+ *
+ * Note that the first argument, baddr, is pre-shifted; If LPA2 is in use, BADDR
+ * holds addr[52:16]. Else BADDR holds page number. See for example ARM DDI
+ * 0487J.a section C5.5.60 "TLBI VAE1IS, TLBI VAE1ISNXS, TLB Invalidate by VA,
+ * EL1, Inner Shareable".
*
*/
-#define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \
- ({ \
- unsigned long __ta = (addr) >> PAGE_SHIFT; \
- __ta &= GENMASK_ULL(36, 0); \
- __ta |= (unsigned long)(ttl) << 37; \
- __ta |= (unsigned long)(num) << 39; \
- __ta |= (unsigned long)(scale) << 44; \
- __ta |= get_trans_granule() << 46; \
- __ta |= (unsigned long)(asid) << 48; \
- __ta; \
+#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
+ ({ \
+ unsigned long __ta = (baddr); \
+ unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
+ __ta &= GENMASK_ULL(36, 0); \
+ __ta |= __ttl << 37; \
+ __ta |= (unsigned long)(num) << 39; \
+ __ta |= (unsigned long)(scale) << 44; \
+ __ta |= get_trans_granule() << 46; \
+ __ta |= (unsigned long)(asid) << 48; \
+ __ta; \
})
/* These macros are used by the TLBI RANGE feature. */
@@ -216,12 +225,16 @@ static inline unsigned long get_trans_granule(void)
* CPUs, ensuring that any walk-cache entries associated with the
* translation are also invalidated.
*
- * __flush_tlb_range(vma, start, end, stride, last_level)
+ * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
* Invalidate the virtual-address range '[start, end)' on all
* CPUs for the user address space corresponding to 'vma->mm'.
* The invalidation operations are issued at a granularity
* determined by 'stride' and only affect any walk-cache entries
- * if 'last_level' is equal to false.
+ * if 'last_level' is equal to false. tlb_level is the level at
+ * which the invalidation must take place. If the level is wrong,
+ * no invalidation may take place. In the case where the level
+ * cannot be easily determined, the value TLBI_TTL_UNKNOWN will
+ * perform a non-hinted invalidation.
*
*
* Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
@@ -345,34 +358,44 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* @tlb_level: Translation Table level hint, if known
* @tlbi_user: If 'true', call an additional __tlbi_user()
* (typically for user ASIDs). 'flase' for IPA instructions
+ * @lpa2: If 'true', the lpa2 scheme is used as set out below
*
* When the CPU does not support TLB range operations, flush the TLB
* entries one by one at the granularity of 'stride'. If the TLB
* range ops are supported, then:
*
- * 1. If 'pages' is odd, flush the first page through non-range
- * operations;
+ * 1. If FEAT_LPA2 is in use, the start address of a range operation must be
+ * 64KB aligned, so flush pages one by one until the alignment is reached
+ * using the non-range operations. This step is skipped if LPA2 is not in
+ * use.
+ *
+ * 2. The minimum range granularity is decided by 'scale', so multiple range
+ * TLBI operations may be required. Start from scale = 3, flush the largest
+ * possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
+ * requested range, then decrement scale and continue until one or zero pages
+ * are left. We must start from highest scale to ensure 64KB start alignment
+ * is maintained in the LPA2 case.
*
- * 2. For remaining pages: the minimum range granularity is decided
- * by 'scale', so multiple range TLBI operations may be required.
- * Start from scale = 0, flush the corresponding number of pages
- * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
- * until no pages left.
+ * 3. If there is 1 page remaining, flush it through non-range operations. Range
+ * operations can only span an even number of pages. We save this for last to
+ * ensure 64KB start alignment is maintained for the LPA2 case.
*
* Note that certain ranges can be represented by either num = 31 and
* scale or num = 0 and scale + 1. The loop below favours the latter
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
*/
#define __flush_tlb_range_op(op, start, pages, stride, \
- asid, tlb_level, tlbi_user) \
+ asid, tlb_level, tlbi_user, lpa2) \
do { \
int num = 0; \
- int scale = 0; \
+ int scale = 3; \
+ int shift = lpa2 ? 16 : PAGE_SHIFT; \
unsigned long addr; \
\
while (pages > 0) { \
if (!system_supports_tlb_range() || \
- pages % 2 == 1) { \
+ pages == 1 || \
+ (lpa2 && start != ALIGN(start, SZ_64K))) { \
addr = __TLBI_VADDR(start, asid); \
__tlbi_level(op, addr, tlb_level); \
if (tlbi_user) \
@@ -384,20 +407,20 @@ do { \
\
num = __TLBI_RANGE_NUM(pages, scale); \
if (num >= 0) { \
- addr = __TLBI_VADDR_RANGE(start, asid, scale, \
- num, tlb_level); \
+ addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
+ scale, num, tlb_level); \
__tlbi(r##op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
pages -= __TLBI_RANGE_PAGES(num, scale); \
} \
- scale++; \
+ scale--; \
} \
} while (0)
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
- __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
+ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
@@ -427,9 +450,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
asid = ASID(vma->vm_mm);
if (last_level)
- __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
+ __flush_tlb_range_op(vale1is, start, pages, stride, asid,
+ tlb_level, true, lpa2_is_enabled());
else
- __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
+ __flush_tlb_range_op(vae1is, start, pages, stride, asid,
+ tlb_level, true, lpa2_is_enabled());
dsb(ish);
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
@@ -441,9 +466,10 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
/*
* We cannot use leaf-only invalidation here, since we may be invalidating
* table entries as part of collapsing hugepages or moving page tables.
- * Set the tlb_level to 0 because we can not get enough information here.
+ * Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
+ * information here.
*/
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 9fab663dd2de..a323b109b9c4 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -23,6 +23,7 @@ void update_freq_counters_refs(void);
#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
#ifdef CONFIG_ACPI_CPPC_LIB
#define arch_init_invariance_cppc topology_init_cpu_capacity_cppc
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 531effca5f1f..491b2b9bd553 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -39,7 +39,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 457
+#define __NR_compat_syscalls 462
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 9f7c1bf99526..7118282d1c79 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -919,6 +919,16 @@ __SYSCALL(__NR_futex_wake, sys_futex_wake)
__SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 91d2d6714969..01a4c1d7fc09 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1081,25 +1081,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
-
- /*
- * Initialize the indirect array of CPU capabilities pointers before we
- * handle the boot CPU below.
- */
- init_cpucap_indirect_list();
-
- /*
- * Detect broken pseudo-NMI. Must be called _before_ the call to
- * setup_boot_cpu_capabilities() since it interacts with
- * can_use_gic_priorities().
- */
- detect_system_supports_pseudo_nmi();
-
- /*
- * Detect and enable early CPU capabilities based on the boot CPU,
- * after we have initialised the CPU feature infrastructure.
- */
- setup_boot_cpu_capabilities();
}
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -1584,16 +1565,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
return has_sre;
}
-static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
-{
- u32 midr = read_cpuid_id();
-
- /* Cavium ThunderX pass 1.x and 2.x */
- return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
- MIDR_CPU_VAR_REV(0, 0),
- MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
-}
-
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
int scope)
{
@@ -1768,6 +1739,39 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return !meltdown_safe;
}
+#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
+static bool has_lpa2_at_stage1(u64 mmfr0)
+{
+ unsigned int tgran;
+
+ tgran = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_EL1_TGRAN_SHIFT);
+ return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
+}
+
+static bool has_lpa2_at_stage2(u64 mmfr0)
+{
+ unsigned int tgran;
+
+ tgran = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_EL1_TGRAN_2_SHIFT);
+ return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2;
+}
+
+static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u64 mmfr0;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0);
+}
+#else
+static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
@@ -1840,7 +1844,7 @@ static int __init __kpti_install_ng_mappings(void *__unused)
static void __init kpti_install_ng_mappings(void)
{
/* Check whether KPTI is going to be used */
- if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
+ if (!arm64_kernel_unmapped_at_el0())
return;
/*
@@ -2326,12 +2330,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
},
#endif /* CONFIG_ARM64_LSE_ATOMICS */
{
- .desc = "Software prefetching using PRFM",
- .capability = ARM64_HAS_NO_HW_PREFETCH,
- .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
- .matches = has_no_hw_prefetch,
- },
- {
.desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
@@ -2735,6 +2733,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
},
+ {
+ .desc = "52-bit Virtual Addressing for KVM (LPA2)",
+ .capability = ARM64_HAS_LPA2,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_lpa2,
+ },
{},
};
@@ -3275,14 +3279,6 @@ void check_local_cpu_capabilities(void)
verify_local_cpu_capabilities();
}
-static void __init setup_boot_cpu_capabilities(void)
-{
- /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
- update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
- /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
- enable_cpu_capabilities(SCOPE_BOOT_CPU);
-}
-
bool this_cpu_has_cap(unsigned int n)
{
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
@@ -3338,37 +3334,52 @@ unsigned long cpu_get_elf_hwcap2(void)
return elf_hwcap[1];
}
-void __init setup_system_features(void)
+static void __init setup_boot_cpu_capabilities(void)
{
- int i;
/*
- * The system-wide safe feature feature register values have been
- * finalized. Finalize and log the available system capabilities.
+ * The boot CPU's feature register values have been recorded. Detect
+ * boot cpucaps and local cpucaps for the boot CPU, then enable and
+ * patch alternatives for the available boot cpucaps.
*/
- update_cpu_capabilities(SCOPE_SYSTEM);
- if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
- !cpus_have_cap(ARM64_HAS_PAN))
- pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+ update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
+ enable_cpu_capabilities(SCOPE_BOOT_CPU);
+ apply_boot_alternatives();
+}
+void __init setup_boot_cpu_features(void)
+{
/*
- * Enable all the available capabilities which have not been enabled
- * already.
+ * Initialize the indirect array of CPU capabilities pointers before we
+ * handle the boot CPU.
*/
- enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+ init_cpucap_indirect_list();
- kpti_install_ng_mappings();
+ /*
+ * Detect broken pseudo-NMI. Must be called _before_ the call to
+ * setup_boot_cpu_capabilities() since it interacts with
+ * can_use_gic_priorities().
+ */
+ detect_system_supports_pseudo_nmi();
- sve_setup();
- sme_setup();
+ setup_boot_cpu_capabilities();
+}
+static void __init setup_system_capabilities(void)
+{
/*
- * Check for sane CTR_EL0.CWG value.
+ * The system-wide safe feature register values have been finalized.
+ * Detect, enable, and patch alternatives for the available system
+ * cpucaps.
*/
- if (!cache_type_cwg())
- pr_warn("No Cache Writeback Granule information, assuming %d\n",
- ARCH_DMA_MINALIGN);
+ update_cpu_capabilities(SCOPE_SYSTEM);
+ enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+ apply_alternatives_all();
- for (i = 0; i < ARM64_NCAPS; i++) {
+ /*
+ * Log any cpucaps with a cpumask as these aren't logged by
+ * update_cpu_capabilities().
+ */
+ for (int i = 0; i < ARM64_NCAPS; i++) {
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
if (caps && caps->cpus && caps->desc &&
@@ -3376,6 +3387,29 @@ void __init setup_system_features(void)
pr_info("detected: %s on CPU%*pbl\n",
caps->desc, cpumask_pr_args(caps->cpus));
}
+
+ /*
+ * TTBR0 PAN doesn't have its own cpucap, so log it manually.
+ */
+ if (system_uses_ttbr0_pan())
+ pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+}
+
+void __init setup_system_features(void)
+{
+ setup_system_capabilities();
+
+ kpti_install_ng_mappings();
+
+ sve_setup();
+ sme_setup();
+
+ /*
+ * Check for sane CTR_EL0.CWG value.
+ */
+ if (!cache_type_cwg())
+ pr_warn("No Cache Writeback Granule information, assuming %d\n",
+ ARCH_DMA_MINALIGN);
}
void __init setup_user_features(void)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index a257da7b56fe..47043c0d95ec 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -36,8 +36,6 @@ static struct cpuinfo_arm64 boot_cpu_data;
static inline const char *icache_policy_str(int l1ip)
{
switch (l1ip) {
- case CTR_EL0_L1Ip_VPIPT:
- return "VPIPT";
case CTR_EL0_L1Ip_VIPT:
return "VIPT";
case CTR_EL0_L1Ip_PIPT:
@@ -388,9 +386,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
switch (l1ip) {
case CTR_EL0_L1Ip_PIPT:
break;
- case CTR_EL0_L1Ip_VPIPT:
- set_bit(ICACHEF_VPIPT, &__icache_flags);
- break;
case CTR_EL0_L1Ip_VIPT:
default:
/* Assume aliasing */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 1559c706d32d..505f389be3e0 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -85,13 +85,13 @@
* softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
* flag the register state as invalid.
*
- * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
- * save the task's FPSIMD context back to task_struct from softirq context.
- * To prevent this from racing with the manipulation of the task's FPSIMD state
- * from task context and thereby corrupting the state, it is necessary to
- * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
- * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
- * run but prevent them to use FPSIMD.
+ * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may be
+ * called from softirq context, which will save the task's FPSIMD context back
+ * to task_struct. To prevent this from racing with the manipulation of the
+ * task's FPSIMD state from task context and thereby corrupting the state, it
+ * is necessary to protect any manipulation of a task's fpsimd_state or
+ * TIF_FOREIGN_FPSTATE flag with get_cpu_fpsimd_context(), which will suspend
+ * softirq servicing entirely until put_cpu_fpsimd_context() is called.
*
* For a certain task, the sequence may look something like this:
* - the task gets scheduled in; if both the task's fpsimd_cpu field
@@ -209,27 +209,14 @@ static inline void sme_free(struct task_struct *t) { }
#endif
-DEFINE_PER_CPU(bool, fpsimd_context_busy);
-EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
-
static void fpsimd_bind_task_to_cpu(void);
-static void __get_cpu_fpsimd_context(void)
-{
- bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
-
- WARN_ON(busy);
-}
-
/*
* Claim ownership of the CPU FPSIMD context for use by the calling context.
*
* The caller may freely manipulate the FPSIMD context metadata until
* put_cpu_fpsimd_context() is called.
*
- * The double-underscore version must only be called if you know the task
- * can't be preempted.
- *
* On RT kernels local_bh_disable() is not sufficient because it only
* serializes soft interrupt related sections via a local lock, but stays
* preemptible. Disabling preemption is the right choice here as bottom
@@ -242,14 +229,6 @@ static void get_cpu_fpsimd_context(void)
local_bh_disable();
else
preempt_disable();
- __get_cpu_fpsimd_context();
-}
-
-static void __put_cpu_fpsimd_context(void)
-{
- bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
-
- WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
}
/*
@@ -261,18 +240,12 @@ static void __put_cpu_fpsimd_context(void)
*/
static void put_cpu_fpsimd_context(void)
{
- __put_cpu_fpsimd_context();
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_bh_enable();
else
preempt_enable();
}
-static bool have_cpu_fpsimd_context(void)
-{
- return !preemptible() && __this_cpu_read(fpsimd_context_busy);
-}
-
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
{
return task->thread.vl[type];
@@ -383,7 +356,8 @@ static void task_fpsimd_load(void)
bool restore_ffr;
WARN_ON(!system_supports_fpsimd());
- WARN_ON(!have_cpu_fpsimd_context());
+ WARN_ON(preemptible());
+ WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
if (system_supports_sve() || system_supports_sme()) {
switch (current->thread.fp_type) {
@@ -406,7 +380,7 @@ static void task_fpsimd_load(void)
default:
/*
* This indicates either a bug in
- * fpsimd_save() or memory corruption, we
+ * fpsimd_save_user_state() or memory corruption, we
* should always record an explicit format
* when we save. We always at least have the
* memory allocated for FPSMID registers so
@@ -457,7 +431,7 @@ static void task_fpsimd_load(void)
* than via current, if we are saving KVM state then it will have
* ensured that the type of registers to save is set in last->to_save.
*/
-static void fpsimd_save(void)
+static void fpsimd_save_user_state(void)
{
struct cpu_fp_state const *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -467,7 +441,7 @@ static void fpsimd_save(void)
unsigned int vl;
WARN_ON(!system_supports_fpsimd());
- WARN_ON(!have_cpu_fpsimd_context());
+ WARN_ON(preemptible());
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
@@ -888,7 +862,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
if (task == current) {
get_cpu_fpsimd_context();
- fpsimd_save();
+ fpsimd_save_user_state();
}
fpsimd_flush_task_state(task);
@@ -1171,7 +1145,7 @@ void __init sve_setup(void)
unsigned long b;
int max_bit;
- if (!cpus_have_cap(ARM64_SVE))
+ if (!system_supports_sve())
return;
/*
@@ -1301,7 +1275,7 @@ void __init sme_setup(void)
struct vl_info *info = &vl_info[ARM64_VEC_SME];
int min_bit, max_bit;
- if (!cpus_have_cap(ARM64_SME))
+ if (!system_supports_sme())
return;
/*
@@ -1500,6 +1474,34 @@ void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
current);
}
+static void fpsimd_load_kernel_state(struct task_struct *task)
+{
+ struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
+
+ /*
+ * Elide the load if this CPU holds the most recent kernel mode
+ * FPSIMD context of the current task.
+ */
+ if (last->st == &task->thread.kernel_fpsimd_state &&
+ task->thread.kernel_fpsimd_cpu == smp_processor_id())
+ return;
+
+ fpsimd_load_state(&task->thread.kernel_fpsimd_state);
+}
+
+static void fpsimd_save_kernel_state(struct task_struct *task)
+{
+ struct cpu_fp_state cpu_fp_state = {
+ .st = &task->thread.kernel_fpsimd_state,
+ .to_save = FP_STATE_FPSIMD,
+ };
+
+ fpsimd_save_state(&task->thread.kernel_fpsimd_state);
+ fpsimd_bind_state_to_cpu(&cpu_fp_state);
+
+ task->thread.kernel_fpsimd_cpu = smp_processor_id();
+}
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1507,24 +1509,31 @@ void fpsimd_thread_switch(struct task_struct *next)
if (!system_supports_fpsimd())
return;
- __get_cpu_fpsimd_context();
+ WARN_ON_ONCE(!irqs_disabled());
/* Save unsaved fpsimd state, if any: */
- fpsimd_save();
-
- /*
- * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
- * state. For kernel threads, FPSIMD registers are never loaded
- * and wrong_task and wrong_cpu will always be true.
- */
- wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
- &next->thread.uw.fpsimd_state;
- wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
+ if (test_thread_flag(TIF_KERNEL_FPSTATE))
+ fpsimd_save_kernel_state(current);
+ else
+ fpsimd_save_user_state();
- update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
- wrong_task || wrong_cpu);
+ if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
+ fpsimd_load_kernel_state(next);
+ set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
+ } else {
+ /*
+ * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
+ * state. For kernel threads, FPSIMD registers are never
+ * loaded with user mode FPSIMD state and so wrong_task and
+ * wrong_cpu will always be true.
+ */
+ wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
+ &next->thread.uw.fpsimd_state;
+ wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
- __put_cpu_fpsimd_context();
+ update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
+ wrong_task || wrong_cpu);
+ }
}
static void fpsimd_flush_thread_vl(enum vec_type type)
@@ -1614,7 +1623,7 @@ void fpsimd_preserve_current_state(void)
return;
get_cpu_fpsimd_context();
- fpsimd_save();
+ fpsimd_save_user_state();
put_cpu_fpsimd_context();
}
@@ -1826,13 +1835,15 @@ static void fpsimd_flush_cpu_state(void)
*/
void fpsimd_save_and_flush_cpu_state(void)
{
+ unsigned long flags;
+
if (!system_supports_fpsimd())
return;
WARN_ON(preemptible());
- __get_cpu_fpsimd_context();
- fpsimd_save();
+ local_irq_save(flags);
+ fpsimd_save_user_state();
fpsimd_flush_cpu_state();
- __put_cpu_fpsimd_context();
+ local_irq_restore(flags);
}
#ifdef CONFIG_KERNEL_MODE_NEON
@@ -1864,10 +1875,37 @@ void kernel_neon_begin(void)
get_cpu_fpsimd_context();
/* Save unsaved fpsimd state, if any: */
- fpsimd_save();
+ if (test_thread_flag(TIF_KERNEL_FPSTATE)) {
+ BUG_ON(IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq());
+ fpsimd_save_kernel_state(current);
+ } else {
+ fpsimd_save_user_state();
+
+ /*
+ * Set the thread flag so that the kernel mode FPSIMD state
+ * will be context switched along with the rest of the task
+ * state.
+ *
+ * On non-PREEMPT_RT, softirqs may interrupt task level kernel
+ * mode FPSIMD, but the task will not be preemptible so setting
+ * TIF_KERNEL_FPSTATE for those would be both wrong (as it
+ * would mark the task context FPSIMD state as requiring a
+ * context switch) and unnecessary.
+ *
+ * On PREEMPT_RT, softirqs are serviced from a separate thread,
+ * which is scheduled as usual, and this guarantees that these
+ * softirqs are not interrupting use of the FPSIMD in kernel
+ * mode in task context. So in this case, setting the flag here
+ * is always appropriate.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq())
+ set_thread_flag(TIF_KERNEL_FPSTATE);
+ }
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
+
+ put_cpu_fpsimd_context();
}
EXPORT_SYMBOL_GPL(kernel_neon_begin);
@@ -1885,7 +1923,16 @@ void kernel_neon_end(void)
if (!system_supports_fpsimd())
return;
- put_cpu_fpsimd_context();
+ /*
+ * If we are returning from a nested use of kernel mode FPSIMD, restore
+ * the task context kernel mode FPSIMD state. This can only happen when
+ * running in softirq context on non-PREEMPT_RT.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && in_serving_softirq() &&
+ test_thread_flag(TIF_KERNEL_FPSTATE))
+ fpsimd_load_kernel_state(current);
+ else
+ clear_thread_flag(TIF_KERNEL_FPSTATE);
}
EXPORT_SYMBOL_GPL(kernel_neon_end);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7b236994f0e1..cab7f91949d8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -482,7 +482,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
str_l x21, __fdt_pointer, x5 // Save FDT pointer
- ldr_l x4, kimage_vaddr // Save the offset between
+ adrp x4, _text // Save the offset between
sub x4, x4, x0 // the kernel virtual and
str_l x4, kimage_voffset, x5 // physical mappings
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 3addc09f8746..e30fd9e32ef3 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -21,14 +21,25 @@
static u64 __boot_status __initdata;
+// temporary __prel64 related definitions
+// to be removed when this code is moved under pi/
+
+#define __prel64_initconst __initconst
+
+#define PREL64(type, name) union { type *name; }
+
+#define prel64_pointer(__d) (__d)
+
+typedef bool filter_t(u64 val);
+
struct ftr_set_desc {
char name[FTR_DESC_NAME_LEN];
- struct arm64_ftr_override *override;
+ PREL64(struct arm64_ftr_override, override);
struct {
char name[FTR_DESC_FIELD_LEN];
u8 shift;
u8 width;
- bool (*filter)(u64 val);
+ PREL64(filter_t, filter);
} fields[];
};
@@ -46,7 +57,7 @@ static bool __init mmfr1_vh_filter(u64 val)
val == 0);
}
-static const struct ftr_set_desc mmfr1 __initconst = {
+static const struct ftr_set_desc mmfr1 __prel64_initconst = {
.name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override,
.fields = {
@@ -70,7 +81,7 @@ static bool __init pfr0_sve_filter(u64 val)
return true;
}
-static const struct ftr_set_desc pfr0 __initconst = {
+static const struct ftr_set_desc pfr0 __prel64_initconst = {
.name = "id_aa64pfr0",
.override = &id_aa64pfr0_override,
.fields = {
@@ -94,7 +105,7 @@ static bool __init pfr1_sme_filter(u64 val)
return true;
}
-static const struct ftr_set_desc pfr1 __initconst = {
+static const struct ftr_set_desc pfr1 __prel64_initconst = {
.name = "id_aa64pfr1",
.override = &id_aa64pfr1_override,
.fields = {
@@ -105,7 +116,7 @@ static const struct ftr_set_desc pfr1 __initconst = {
},
};
-static const struct ftr_set_desc isar1 __initconst = {
+static const struct ftr_set_desc isar1 __prel64_initconst = {
.name = "id_aa64isar1",
.override = &id_aa64isar1_override,
.fields = {
@@ -117,7 +128,7 @@ static const struct ftr_set_desc isar1 __initconst = {
},
};
-static const struct ftr_set_desc isar2 __initconst = {
+static const struct ftr_set_desc isar2 __prel64_initconst = {
.name = "id_aa64isar2",
.override = &id_aa64isar2_override,
.fields = {
@@ -128,7 +139,7 @@ static const struct ftr_set_desc isar2 __initconst = {
},
};
-static const struct ftr_set_desc smfr0 __initconst = {
+static const struct ftr_set_desc smfr0 __prel64_initconst = {
.name = "id_aa64smfr0",
.override = &id_aa64smfr0_override,
.fields = {
@@ -149,7 +160,7 @@ static bool __init hvhe_filter(u64 val)
ID_AA64MMFR1_EL1_VH_SHIFT));
}
-static const struct ftr_set_desc sw_features __initconst = {
+static const struct ftr_set_desc sw_features __prel64_initconst = {
.name = "arm64_sw",
.override = &arm64_sw_feature_override,
.fields = {
@@ -159,22 +170,23 @@ static const struct ftr_set_desc sw_features __initconst = {
},
};
-static const struct ftr_set_desc * const regs[] __initconst = {
- &mmfr1,
- &pfr0,
- &pfr1,
- &isar1,
- &isar2,
- &smfr0,
- &sw_features,
+static const
+PREL64(const struct ftr_set_desc, reg) regs[] __prel64_initconst = {
+ { &mmfr1 },
+ { &pfr0 },
+ { &pfr1 },
+ { &isar1 },
+ { &isar2 },
+ { &smfr0 },
+ { &sw_features },
};
static const struct {
char alias[FTR_ALIAS_NAME_LEN];
char feature[FTR_ALIAS_OPTION_LEN];
} aliases[] __initconst = {
- { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
- { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
+ { "kvm_arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
+ { "kvm_arm.mode=protected", "id_aa64mmfr1.vh=0" },
{ "arm64.nosve", "id_aa64pfr0.sve=0" },
{ "arm64.nosme", "id_aa64pfr1.sme=0" },
{ "arm64.nobti", "id_aa64pfr1.bt=0" },
@@ -187,45 +199,61 @@ static const struct {
{ "nokaslr", "arm64_sw.nokaslr=1" },
};
-static int __init parse_nokaslr(char *unused)
+static int __init parse_hexdigit(const char *p, u64 *v)
{
- /* nokaslr param handling is done by early cpufeature code */
+ // skip "0x" if it comes next
+ if (p[0] == '0' && tolower(p[1]) == 'x')
+ p += 2;
+
+ // check whether the RHS is a single hex digit
+ if (!isxdigit(p[0]) || (p[1] && !isspace(p[1])))
+ return -EINVAL;
+
+ *v = tolower(*p) - (isdigit(*p) ? '0' : 'a' - 10);
return 0;
}
-early_param("nokaslr", parse_nokaslr);
-static int __init find_field(const char *cmdline,
+static int __init find_field(const char *cmdline, char *opt, int len,
const struct ftr_set_desc *reg, int f, u64 *v)
{
- char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2];
- int len;
+ int flen = strlen(reg->fields[f].name);
- len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=",
- reg->name, reg->fields[f].name);
+ // append '<fieldname>=' to obtain '<name>.<fieldname>='
+ memcpy(opt + len, reg->fields[f].name, flen);
+ len += flen;
+ opt[len++] = '=';
- if (!parameqn(cmdline, opt, len))
+ if (memcmp(cmdline, opt, len))
return -1;
- return kstrtou64(cmdline + len, 0, v);
+ return parse_hexdigit(cmdline + len, v);
}
static void __init match_options(const char *cmdline)
{
+ char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2];
int i;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ const struct ftr_set_desc *reg = prel64_pointer(regs[i].reg);
+ struct arm64_ftr_override *override;
+ int len = strlen(reg->name);
int f;
- if (!regs[i]->override)
- continue;
+ override = prel64_pointer(reg->override);
- for (f = 0; strlen(regs[i]->fields[f].name); f++) {
- u64 shift = regs[i]->fields[f].shift;
- u64 width = regs[i]->fields[f].width ?: 4;
+ // set opt[] to '<name>.'
+ memcpy(opt, reg->name, len);
+ opt[len++] = '.';
+
+ for (f = 0; reg->fields[f].name[0] != '\0'; f++) {
+ u64 shift = reg->fields[f].shift;
+ u64 width = reg->fields[f].width ?: 4;
u64 mask = GENMASK_ULL(shift + width - 1, shift);
+ bool (*filter)(u64 val);
u64 v;
- if (find_field(cmdline, regs[i], f, &v))
+ if (find_field(cmdline, opt, len, reg, f, &v))
continue;
/*
@@ -233,16 +261,16 @@ static void __init match_options(const char *cmdline)
* it by setting the value to the all-ones while
* clearing the mask... Yes, this is fragile.
*/
- if (regs[i]->fields[f].filter &&
- !regs[i]->fields[f].filter(v)) {
- regs[i]->override->val |= mask;
- regs[i]->override->mask &= ~mask;
+ filter = prel64_pointer(reg->fields[f].filter);
+ if (filter && !filter(v)) {
+ override->val |= mask;
+ override->mask &= ~mask;
continue;
}
- regs[i]->override->val &= ~mask;
- regs[i]->override->val |= (v << shift) & mask;
- regs[i]->override->mask |= mask;
+ override->val &= ~mask;
+ override->val |= (v << shift) & mask;
+ override->mask |= mask;
return;
}
@@ -258,23 +286,29 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
cmdline = skip_spaces(cmdline);
- for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++);
- if (!len)
+ /* terminate on "--" appearing on the command line by itself */
+ if (cmdline[0] == '-' && cmdline[1] == '-' && isspace(cmdline[2]))
return;
- len = min(len, ARRAY_SIZE(buf) - 1);
- memcpy(buf, cmdline, len);
- buf[len] = '\0';
-
- if (strcmp(buf, "--") == 0)
+ for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++) {
+ if (len >= sizeof(buf) - 1)
+ break;
+ if (cmdline[len] == '-')
+ buf[len] = '_';
+ else
+ buf[len] = cmdline[len];
+ }
+ if (!len)
return;
+ buf[len] = 0;
+
cmdline += len;
match_options(buf);
for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++)
- if (parameq(buf, aliases[i].alias))
+ if (!memcmp(buf, aliases[i].alias, len + 1))
__parse_cmdline(aliases[i].feature, false);
} while (1);
}
@@ -316,13 +350,16 @@ void init_feature_override(u64 boot_status);
asmlinkage void __init init_feature_override(u64 boot_status)
{
+ struct arm64_ftr_override *override;
+ const struct ftr_set_desc *reg;
int i;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
- if (regs[i]->override) {
- regs[i]->override->val = 0;
- regs[i]->override->mask = 0;
- }
+ reg = prel64_pointer(regs[i].reg);
+ override = prel64_pointer(reg->override);
+
+ override->val = 0;
+ override->mask = 0;
}
__boot_status = boot_status;
@@ -330,9 +367,9 @@ asmlinkage void __init init_feature_override(u64 boot_status)
parse_cmdline();
for (i = 0; i < ARRAY_SIZE(regs); i++) {
- if (regs[i]->override)
- dcache_clean_inval_poc((unsigned long)regs[i]->override,
- (unsigned long)regs[i]->override +
- sizeof(*regs[i]->override));
+ reg = prel64_pointer(regs[i].reg);
+ override = prel64_pointer(reg->override);
+ dcache_clean_inval_poc((unsigned long)override,
+ (unsigned long)(override + 1));
}
}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 6ad5c6ef5329..85087e2df564 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <asm/daifflags.h>
#include <asm/exception.h>
+#include <asm/numa.h>
#include <asm/softirq_stack.h>
#include <asm/stacktrace.h>
#include <asm/vmap_stack.h>
@@ -47,17 +48,17 @@ static void init_irq_scs(void)
for_each_possible_cpu(cpu)
per_cpu(irq_shadow_call_stack_ptr, cpu) =
- scs_alloc(cpu_to_node(cpu));
+ scs_alloc(early_cpu_to_node(cpu));
}
#ifdef CONFIG_VMAP_STACK
-static void init_irq_stacks(void)
+static void __init init_irq_stacks(void)
{
int cpu;
unsigned long *p;
for_each_possible_cpu(cpu) {
- p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
+ p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu));
per_cpu(irq_stack_ptr, cpu) = p;
}
}
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 94a269cd1f07..12c7f3c8ba76 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -36,3 +36,10 @@ void __init kaslr_init(void)
pr_info("KASLR enabled\n");
__kaslr_is_enabled = true;
}
+
+static int __init parse_nokaslr(char *unused)
+{
+ /* nokaslr param handling is done by early cpufeature code */
+ return 0;
+}
+early_param("nokaslr", parse_nokaslr);
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
index 636be6715155..532d72ea42ee 100644
--- a/arch/arm64/kernel/kexec_image.c
+++ b/arch/arm64/kernel/kexec_image.c
@@ -122,9 +122,9 @@ static void *image_load(struct kimage *image,
kernel_segment->memsz -= text_offset;
image->start = kernel_segment->mem;
- pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- kernel_segment->mem, kbuf.bufsz,
- kernel_segment->memsz);
+ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz,
+ kernel_segment->memsz);
return NULL;
}
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 078910db77a4..b38aae5b488d 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -32,26 +32,12 @@
static void _kexec_image_info(const char *func, int line,
const struct kimage *kimage)
{
- unsigned long i;
-
- pr_debug("%s:%d:\n", func, line);
- pr_debug(" kexec kimage info:\n");
- pr_debug(" type: %d\n", kimage->type);
- pr_debug(" start: %lx\n", kimage->start);
- pr_debug(" head: %lx\n", kimage->head);
- pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
- pr_debug(" dtb_mem: %pa\n", &kimage->arch.dtb_mem);
- pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
- pr_debug(" el2_vectors: %pa\n", &kimage->arch.el2_vectors);
-
- for (i = 0; i < kimage->nr_segments; i++) {
- pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
- i,
- kimage->segment[i].mem,
- kimage->segment[i].mem + kimage->segment[i].memsz,
- kimage->segment[i].memsz,
- kimage->segment[i].memsz / PAGE_SIZE);
- }
+ kexec_dprintk("%s:%d:\n", func, line);
+ kexec_dprintk(" kexec kimage info:\n");
+ kexec_dprintk(" type: %d\n", kimage->type);
+ kexec_dprintk(" head: %lx\n", kimage->head);
+ kexec_dprintk(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
+ kexec_dprintk(" el2_vectors: %pa\n", &kimage->arch.el2_vectors);
}
void machine_kexec_cleanup(struct kimage *kimage)
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index a11a6e14ba89..0e017358f4ba 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -127,8 +127,8 @@ int load_other_segments(struct kimage *image,
image->elf_load_addr = kbuf.mem;
image->elf_headers_sz = headers_sz;
- pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
}
/* load initrd */
@@ -148,8 +148,8 @@ int load_other_segments(struct kimage *image,
goto out_err;
initrd_load_addr = kbuf.mem;
- pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- initrd_load_addr, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, kbuf.bufsz, kbuf.memsz);
}
/* load dtb */
@@ -179,8 +179,8 @@ int load_other_segments(struct kimage *image,
image->arch.dtb = dtb;
image->arch.dtb_mem = kbuf.mem;
- pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- kbuf.mem, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kbuf.mem, kbuf.bufsz, kbuf.memsz);
return 0;
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
index 4c0ea3cd4ea4..c844a0546d7f 100644
--- a/arch/arm64/kernel/pi/Makefile
+++ b/arch/arm64/kernel/pi/Makefile
@@ -3,6 +3,7 @@
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
+ $(DISABLE_LATENT_ENTROPY_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
-include $(srctree)/include/linux/hidden.h \
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index defbab84e9e5..4ced34f62dab 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- setup_system_features();
hyp_mode_check();
- apply_alternatives_all();
+ setup_system_features();
setup_user_features();
mark_linear_text_alias_ro();
}
@@ -454,14 +453,9 @@ void __init smp_prepare_boot_cpu(void)
* freed shortly, so we must move over to the runtime per-cpu area.
*/
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
- cpuinfo_store_boot_cpu();
- /*
- * We now know enough about the boot CPU to apply the
- * alternatives that cannot wait until interrupt handling
- * and/or scheduling is enabled.
- */
- apply_boot_alternatives();
+ cpuinfo_store_boot_cpu();
+ setup_boot_cpu_features();
/* Conditionally switch to GIC PMR for interrupt masking */
if (system_uses_irq_prio_masking())
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 17f66a74c745..7f88028a00c0 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -8,6 +8,7 @@
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -19,6 +20,31 @@
#include <asm/stacktrace.h>
/*
+ * Kernel unwind state
+ *
+ * @common: Common unwind state.
+ * @task: The task being unwound.
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
+ */
+struct kunwind_state {
+ struct unwind_state common;
+ struct task_struct *task;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
+};
+
+static __always_inline void
+kunwind_init(struct kunwind_state *state,
+ struct task_struct *task)
+{
+ unwind_init_common(&state->common);
+ state->task = task;
+}
+
+/*
* Start an unwind from a pt_regs.
*
* The unwind will begin at the PC within the regs.
@@ -26,13 +52,13 @@
* The regs must be on a stack currently owned by the calling task.
*/
static __always_inline void
-unwind_init_from_regs(struct unwind_state *state,
- struct pt_regs *regs)
+kunwind_init_from_regs(struct kunwind_state *state,
+ struct pt_regs *regs)
{
- unwind_init_common(state, current);
+ kunwind_init(state, current);
- state->fp = regs->regs[29];
- state->pc = regs->pc;
+ state->common.fp = regs->regs[29];
+ state->common.pc = regs->pc;
}
/*
@@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state,
* The function which invokes this must be noinline.
*/
static __always_inline void
-unwind_init_from_caller(struct unwind_state *state)
+kunwind_init_from_caller(struct kunwind_state *state)
{
- unwind_init_common(state, current);
+ kunwind_init(state, current);
- state->fp = (unsigned long)__builtin_frame_address(1);
- state->pc = (unsigned long)__builtin_return_address(0);
+ state->common.fp = (unsigned long)__builtin_frame_address(1);
+ state->common.pc = (unsigned long)__builtin_return_address(0);
}
/*
@@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state)
* call this for the current task.
*/
static __always_inline void
-unwind_init_from_task(struct unwind_state *state,
- struct task_struct *task)
+kunwind_init_from_task(struct kunwind_state *state,
+ struct task_struct *task)
{
- unwind_init_common(state, task);
+ kunwind_init(state, task);
- state->fp = thread_saved_fp(task);
- state->pc = thread_saved_pc(task);
+ state->common.fp = thread_saved_fp(task);
+ state->common.pc = thread_saved_pc(task);
}
static __always_inline int
-unwind_recover_return_address(struct unwind_state *state)
+kunwind_recover_return_address(struct kunwind_state *state)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (state->task->ret_stack &&
- (state->pc == (unsigned long)return_to_handler)) {
+ (state->common.pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
- orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
- (void *)state->fp);
- if (WARN_ON_ONCE(state->pc == orig_pc))
+ orig_pc = ftrace_graph_ret_addr(state->task, NULL,
+ state->common.pc,
+ (void *)state->common.fp);
+ if (WARN_ON_ONCE(state->common.pc == orig_pc))
return -EINVAL;
- state->pc = orig_pc;
+ state->common.pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
- if (is_kretprobe_trampoline(state->pc)) {
- state->pc = kretprobe_find_ret_addr(state->task,
- (void *)state->fp,
- &state->kr_cur);
+ if (is_kretprobe_trampoline(state->common.pc)) {
+ unsigned long orig_pc;
+ orig_pc = kretprobe_find_ret_addr(state->task,
+ (void *)state->common.fp,
+ &state->kr_cur);
+ state->common.pc = orig_pc;
}
#endif /* CONFIG_KRETPROBES */
@@ -106,38 +135,40 @@ unwind_recover_return_address(struct unwind_state *state)
* and the location (but not the fp value) of B.
*/
static __always_inline int
-unwind_next(struct unwind_state *state)
+kunwind_next(struct kunwind_state *state)
{
struct task_struct *tsk = state->task;
- unsigned long fp = state->fp;
+ unsigned long fp = state->common.fp;
int err;
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
- err = unwind_next_frame_record(state);
+ err = unwind_next_frame_record(&state->common);
if (err)
return err;
- state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
+ state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
- return unwind_recover_return_address(state);
+ return kunwind_recover_return_address(state);
}
+typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
+
static __always_inline void
-unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
- void *cookie)
+do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
+ void *cookie)
{
- if (unwind_recover_return_address(state))
+ if (kunwind_recover_return_address(state))
return;
while (1) {
int ret;
- if (!consume_entry(cookie, state->pc))
+ if (!consume_state(state, cookie))
break;
- ret = unwind_next(state);
+ ret = kunwind_next(state);
if (ret < 0)
break;
}
@@ -172,9 +203,10 @@ unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
: stackinfo_get_unknown(); \
})
-noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
- void *cookie, struct task_struct *task,
- struct pt_regs *regs)
+static __always_inline void
+kunwind_stack_walk(kunwind_consume_fn consume_state,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
struct stack_info stacks[] = {
stackinfo_get_task(task),
@@ -190,22 +222,48 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
STACKINFO_EFI,
#endif
};
- struct unwind_state state = {
- .stacks = stacks,
- .nr_stacks = ARRAY_SIZE(stacks),
+ struct kunwind_state state = {
+ .common = {
+ .stacks = stacks,
+ .nr_stacks = ARRAY_SIZE(stacks),
+ },
};
if (regs) {
if (task != current)
return;
- unwind_init_from_regs(&state, regs);
+ kunwind_init_from_regs(&state, regs);
} else if (task == current) {
- unwind_init_from_caller(&state);
+ kunwind_init_from_caller(&state);
} else {
- unwind_init_from_task(&state, task);
+ kunwind_init_from_task(&state, task);
}
- unwind(&state, consume_entry, cookie);
+ do_kunwind(&state, consume_state, cookie);
+}
+
+struct kunwind_consume_entry_data {
+ stack_trace_consume_fn consume_entry;
+ void *cookie;
+};
+
+static bool
+arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ struct kunwind_consume_entry_data *data = cookie;
+ return data->consume_entry(data->cookie, state->common.pc);
+}
+
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ struct kunwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
static bool dump_backtrace_entry(void *arg, unsigned long where)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 817d788cd866..1a2c72f3e7f8 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -82,7 +82,12 @@ int __init parse_acpi_topology(void)
#undef pr_fmt
#define pr_fmt(fmt) "AMU: " fmt
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
+/*
+ * Ensure that amu_scale_freq_tick() will return SCHED_CAPACITY_SCALE until
+ * the CPU capacity and its associated frequency have been correctly
+ * initialized.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT);
static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
static cpumask_var_t amu_fie_cpus;
@@ -112,14 +117,14 @@ static inline bool freq_counters_valid(int cpu)
return true;
}
-static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
+void freq_inv_set_max_ratio(int cpu, u64 max_rate)
{
- u64 ratio;
+ u64 ratio, ref_rate = arch_timer_get_rate();
if (unlikely(!max_rate || !ref_rate)) {
- pr_debug("CPU%d: invalid maximum or reference frequency.\n",
+ WARN_ONCE(1, "CPU%d: invalid maximum or reference frequency.\n",
cpu);
- return -EINVAL;
+ return;
}
/*
@@ -139,12 +144,10 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
ratio = div64_u64(ratio, max_rate);
if (!ratio) {
WARN_ONCE(1, "Reference frequency too low.\n");
- return -EINVAL;
+ return;
}
- per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
-
- return 0;
+ WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio);
}
static void amu_scale_freq_tick(void)
@@ -195,10 +198,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
return;
for_each_cpu(cpu, cpus) {
- if (!freq_counters_valid(cpu) ||
- freq_inv_set_max_ratio(cpu,
- cpufreq_get_hw_max_freq(cpu) * 1000ULL,
- arch_timer_get_rate()))
+ if (!freq_counters_valid(cpu))
return;
}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 1f911a76c5af..2266fcdff78a 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -118,7 +118,7 @@ endif
VDSO_CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
# Build rules
-targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw
+targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso32.so.dbg vdso.so.raw
c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso))
c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday))
asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso))
@@ -127,15 +127,15 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
+include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
$(call if_changed,vdsosym)
# Strip rule for vdso.so
$(obj)/vdso.so: OBJCOPYFLAGS := -S
-$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+$(obj)/vdso.so: $(obj)/vdso32.so.dbg FORCE
$(call if_changed,objcopy)
-$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
+$(obj)/vdso32.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
$(call if_changed,vdsomunge)
# Link rule for the .so file, .lds has to be first
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index fe5472a184a3..97c527ef53c2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -16,7 +16,7 @@ struct hyp_pool {
* API at EL2.
*/
hyp_spinlock_t lock;
- struct list_head free_area[MAX_ORDER + 1];
+ struct list_head free_area[NR_PAGE_ORDERS];
phys_addr_t range_start;
phys_addr_t range_end;
unsigned short max_order;
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index b1e392186a0f..e691290d3765 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -228,7 +228,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i;
hyp_spin_lock_init(&pool->lock);
- pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT));
+ pool->max_order = min(MAX_PAGE_ORDER,
+ get_order(nr_pages << PAGE_SHIFT));
for (i = 0; i <= pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 9d23a51d7f75..b29f15418c0a 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -12,7 +12,7 @@
#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
-/* Used by icache_is_vpipt(). */
+/* Used by icache_is_aliasing(). */
unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 1b265713d6be..a60fb13e2192 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -105,28 +105,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish);
isb();
- /*
- * If the host is running at EL1 and we have a VPIPT I-cache,
- * then we must perform I-cache maintenance at EL2 in order for
- * it to have an effect on the guest. Since the guest cannot hit
- * I-cache lines allocated with a different VMID, we don't need
- * to worry about junk out of guest reset (we nuke the I-cache on
- * VMID rollover), but we do need to be careful when remapping
- * executable pages for the same guest. This can happen when KSM
- * takes a CoW fault on an executable page, copies the page into
- * a page that was previously mapped in the guest and then needs
- * to invalidate the guest view of the I-cache for that page
- * from EL1. To solve this, we invalidate the entire I-cache when
- * unmapping a page from a guest if we have a VPIPT I-cache but
- * the host is running at EL1. As above, we could do better if
- * we had the VA.
- *
- * The moral of this story is: if you have a VPIPT I-cache, then
- * you should be running with VHE enabled.
- */
- if (icache_is_vpipt())
- icache_inval_all_pou();
-
__tlb_switch_to_host(&cxt);
}
@@ -157,28 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh);
isb();
- /*
- * If the host is running at EL1 and we have a VPIPT I-cache,
- * then we must perform I-cache maintenance at EL2 in order for
- * it to have an effect on the guest. Since the guest cannot hit
- * I-cache lines allocated with a different VMID, we don't need
- * to worry about junk out of guest reset (we nuke the I-cache on
- * VMID rollover), but we do need to be careful when remapping
- * executable pages for the same guest. This can happen when KSM
- * takes a CoW fault on an executable page, copies the page into
- * a page that was previously mapped in the guest and then needs
- * to invalidate the guest view of the I-cache for that page
- * from EL1. To solve this, we invalidate the entire I-cache when
- * unmapping a page from a guest if we have a VPIPT I-cache but
- * the host is running at EL1. As above, we could do better if
- * we had the VA.
- *
- * The moral of this story is: if you have a VPIPT I-cache, then
- * you should be running with VHE enabled.
- */
- if (icache_is_vpipt())
- icache_inval_all_pou();
-
__tlb_switch_to_host(&cxt);
}
@@ -205,10 +161,6 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish);
isb();
- /* See the comment in __kvm_tlb_flush_vmid_ipa() */
- if (icache_is_vpipt())
- icache_inval_all_pou();
-
__tlb_switch_to_host(&cxt);
}
@@ -246,18 +198,5 @@ void __kvm_flush_vm_context(void)
/* Same remark as in __tlb_switch_to_guest() */
dsb(ish);
__tlbi(alle1is);
-
- /*
- * VIPT and PIPT caches are not affected by VMID, so no maintenance
- * is necessary across a VMID rollover.
- *
- * VPIPT caches constrain lookup and maintenance to the active VMID,
- * so we need to invalidate lines with a stale VMID to avoid an ABA
- * race after multiple rollovers.
- *
- */
- if (icache_is_vpipt())
- asm volatile("ic ialluis");
-
dsb(ish);
}
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index b636b4111dbf..b32e2940df7d 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -216,18 +216,5 @@ void __kvm_flush_vm_context(void)
{
dsb(ishst);
__tlbi(alle1is);
-
- /*
- * VIPT and PIPT caches are not affected by VMID, so no maintenance
- * is necessary across a VMID rollover.
- *
- * VPIPT caches constrain lookup and maintenance to the active VMID,
- * so we need to invalidate lines with a stale VMID to avoid an ABA
- * race after multiple rollovers.
- *
- */
- if (icache_is_vpipt())
- asm volatile("ic ialluis");
-
dsb(ish);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index fe99b3dab6ce..3d9467ff73bc 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -267,9 +267,8 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
{
- u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
+ u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
- val &= ARMV8_PMU_PMCR_N_MASK;
if (val == 0)
return BIT(ARMV8_PMU_CYCLE_IDX);
else
@@ -1136,8 +1135,7 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
*/
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
{
- u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
- ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+ u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
- return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+ return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 4735e1b37fb3..ff45d688bd7d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -877,7 +877,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
u64 pmcr, val;
pmcr = kvm_vcpu_read_pmcr(vcpu);
- val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+ val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
kvm_inject_undefined(vcpu);
return false;
@@ -1143,7 +1143,7 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
- u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+ u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
struct kvm *kvm = vcpu->kvm;
mutex_lock(&kvm->arch.config_lock);
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index c336d2ffdec5..6a56d7cf309d 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -18,13 +18,6 @@
* x1 - src
*/
SYM_FUNC_START(__pi_copy_page)
-alternative_if ARM64_HAS_NO_HW_PREFETCH
- // Prefetch three cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
- prfm pldl1strm, [x1, #384]
-alternative_else_nop_endif
-
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
@@ -39,10 +32,6 @@ alternative_else_nop_endif
1:
tst x0, #(PAGE_SIZE - 1)
-alternative_if ARM64_HAS_NO_HW_PREFETCH
- prfm pldl1strm, [x1, #384]
-alternative_else_nop_endif
-
stnp x2, x3, [x0, #-256]
ldp x2, x3, [x1]
stnp x4, x5, [x0, #16 - 256]
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 460d799e1296..55f6455a8284 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -607,6 +607,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ mm_flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index f5aae342632c..8116ac599f80 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,7 +51,7 @@ void __init arm64_hugetlb_cma_reserve(void)
* page allocator. Just warn if there is any change
* breaking this assumption.
*/
- WARN_ON(order <= MAX_ORDER);
+ WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
}
#endif /* CONFIG_CMA */
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 555285ebd5af..4c7ad574b946 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -170,6 +170,11 @@ asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+ /*
+ * We cannot check the actual value of KASAN_SHADOW_START during build,
+ * as it depends on vabits_actual. As a best-effort approach, check
+ * potential values calculated based on VA_BITS and VA_BITS_MIN.
+ */
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 15f6347d23b6..1ac7467d34c9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -52,9 +52,6 @@ u64 vabits_actual __ro_after_init = VA_BITS_MIN;
EXPORT_SYMBOL(vabits_actual);
#endif
-u64 kimage_vaddr __ro_after_init = (u64)&_text;
-EXPORT_SYMBOL(kimage_vaddr);
-
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
@@ -674,6 +671,9 @@ static int __init map_entry_trampoline(void)
{
int i;
+ if (!arm64_kernel_unmapped_at_el0())
+ return 0;
+
pgprot_t prot = kernel_exec_prot();
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index b98c38288a9d..1e07d74d7a6c 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -37,10 +37,10 @@ HAS_GIC_PRIO_MASKING
HAS_GIC_PRIO_RELAXED_SYNC
HAS_HCX
HAS_LDAPR
+HAS_LPA2
HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
-HAS_NO_HW_PREFETCH
HAS_PAN
HAS_S1PIE
HAS_RAS_EXTN
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 96cbeeab4eec..4c9b67934367 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1002,6 +1002,27 @@ UnsignedEnum 3:0 BT
EndEnum
EndSysreg
+Sysreg ID_AA64PFR2_EL1 3 0 0 4 2
+Res0 63:36
+UnsignedEnum 35:32 FPMR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 31:12
+UnsignedEnum 11:8 MTEFAR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 7:4 MTESTOREONLY
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 3:0 MTEPERM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60
UnsignedEnum 59:56 F64MM
@@ -1058,7 +1079,11 @@ UnsignedEnum 63 FA64
0b0 NI
0b1 IMP
EndEnum
-Res0 62:60
+Res0 62:61
+UnsignedEnum 60 LUTv2
+ 0b0 NI
+ 0b1 IMP
+EndEnum
UnsignedEnum 59:56 SMEver
0b0000 SME
0b0001 SME2
@@ -1086,7 +1111,14 @@ UnsignedEnum 42 F16F16
0b0 NI
0b1 IMP
EndEnum
-Res0 41:40
+UnsignedEnum 41 F8F16
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 40 F8F32
+ 0b0 NI
+ 0b1 IMP
+EndEnum
UnsignedEnum 39:36 I8I32
0b0000 NI
0b1111 IMP
@@ -1107,7 +1139,49 @@ UnsignedEnum 32 F32F32
0b0 NI
0b1 IMP
EndEnum
-Res0 31:0
+Res0 31
+UnsignedEnum 30 SF8FMA
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 29 SF8DP4
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 28 SF8DP2
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+Res0 27:0
+EndSysreg
+
+Sysreg ID_AA64FPFR0_EL1 3 0 0 4 7
+Res0 63:32
+UnsignedEnum 31 F8CVT
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 30 F8FMA
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 29 F8DP4
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 28 F8DP2
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+Res0 27:2
+UnsignedEnum 1 F8E4M3
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+UnsignedEnum 0 F8E5M2
+ 0b0 NI
+ 0b1 IMP
+EndEnum
EndSysreg
Sysreg ID_AA64DFR0_EL1 3 0 0 5 0
@@ -1115,7 +1189,10 @@ Enum 63:60 HPMN0
0b0000 UNPREDICTABLE
0b0001 DEF
EndEnum
-Res0 59:56
+UnsignedEnum 59:56 ExtTrcBuff
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 55:52 BRBE
0b0000 NI
0b0001 IMP
@@ -1327,6 +1404,7 @@ UnsignedEnum 11:8 API
0b0011 PAuth2
0b0100 FPAC
0b0101 FPACCOMBINE
+ 0b0110 PAuth_LR
EndEnum
UnsignedEnum 7:4 APA
0b0000 NI
@@ -1335,6 +1413,7 @@ UnsignedEnum 7:4 APA
0b0011 PAuth2
0b0100 FPAC
0b0101 FPACCOMBINE
+ 0b0110 PAuth_LR
EndEnum
UnsignedEnum 3:0 DPB
0b0000 NI
@@ -1344,7 +1423,14 @@ EndEnum
EndSysreg
Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2
-Res0 63:56
+UnsignedEnum 63:60 ATS1A
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 59:56 LUT
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 55:52 CSSC
0b0000 NI
0b0001 IMP
@@ -1353,7 +1439,19 @@ UnsignedEnum 51:48 RPRFM
0b0000 NI
0b0001 IMP
EndEnum
-Res0 47:32
+Res0 47:44
+UnsignedEnum 43:40 PRFMSLC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 39:36 SYSINSTR_128
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 35:32 SYSREG_128
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 31:28 CLRBHB
0b0000 NI
0b0001 IMP
@@ -1377,6 +1475,7 @@ UnsignedEnum 15:12 APA3
0b0011 PAuth2
0b0100 FPAC
0b0101 FPACCOMBINE
+ 0b0110 PAuth_LR
EndEnum
UnsignedEnum 11:8 GPA3
0b0000 NI
@@ -1392,6 +1491,23 @@ UnsignedEnum 3:0 WFxT
EndEnum
EndSysreg
+Sysreg ID_AA64ISAR3_EL1 3 0 0 6 3
+Res0 63:12
+UnsignedEnum 11:8 TLBIW
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 7:4 FAMINMAX
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 3:0 CPA
+ 0b0000 NI
+ 0b0001 IMP
+ 0b0010 CPA2
+EndEnum
+EndSysreg
+
Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0
UnsignedEnum 63:60 ECV
0b0000 NI
@@ -1680,7 +1796,8 @@ Field 63 TIDCP
Field 62 SPINTMASK
Field 61 NMI
Field 60 EnTP2
-Res0 59:58
+Field 59 TCSO
+Field 58 TCSO0
Field 57 EPAN
Field 56 EnALS
Field 55 EnAS0
@@ -1709,7 +1826,7 @@ EndEnum
Field 37 ITFSB
Field 36 BT1
Field 35 BT0
-Res0 34
+Field 34 EnFPM
Field 33 MSCEn
Field 32 CMOW
Field 31 EnIA
@@ -1747,7 +1864,8 @@ Field 0 M
EndSysreg
SysregFields CPACR_ELx
-Res0 63:29
+Res0 63:30
+Field 29 E0POE
Field 28 TTA
Res0 27:26
Field 25:24 SMEN
@@ -1790,6 +1908,41 @@ Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx
EndSysreg
+SysregFields GCSCR_ELx
+Res0 63:10
+Field 9 STREn
+Field 8 PUSHMEn
+Res0 7
+Field 6 EXLOCKEN
+Field 5 RVCHKEN
+Res0 4:1
+Field 0 PCRSEL
+EndSysregFields
+
+Sysreg GCSCR_EL1 3 0 2 5 0
+Fields GCSCR_ELx
+EndSysreg
+
+SysregFields GCSPR_ELx
+Field 63:3 PTR
+Res0 2:0
+EndSysregFields
+
+Sysreg GCSPR_EL1 3 0 2 5 1
+Fields GCSPR_ELx
+EndSysreg
+
+Sysreg GCSCRE0_EL1 3 0 2 5 2
+Res0 63:11
+Field 10 nTR
+Field 9 STREn
+Field 8 PUSHMEn
+Res0 7:6
+Field 5 RVCHKEN
+Res0 4:1
+Field 0 PCRSEL
+EndSysreg
+
Sysreg ALLINT 3 0 4 3 0
Res0 63:14
Field 13 ALLINT
@@ -1933,10 +2086,18 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
+Sysreg RCWSMASK_EL1 3 0 13 0 3
+Field 63:0 RCWSMASK
+EndSysreg
+
Sysreg TPIDR_EL1 3 0 13 0 4
Field 63:0 ThreadID
EndSysreg
+Sysreg RCWMASK_EL1 3 0 13 0 6
+Field 63:0 RCWMASK
+EndSysreg
+
Sysreg SCXTNUM_EL1 3 0 13 0 7
Field 63:0 SoftwareContextNumber
EndSysreg
@@ -2004,9 +2165,10 @@ Field 27:24 CWG
Field 23:20 ERG
Field 19:16 DminLine
Enum 15:14 L1Ip
- 0b00 VPIPT
+ # This was named as VPIPT in the ARM but now documented as reserved
+ 0b00 RESERVED_VPIPT
# This is named as AIVIVT in the ARM but documented as reserved
- 0b01 RESERVED
+ 0b01 RESERVED_AIVIVT
0b10 VIPT
0b11 PIPT
EndEnum
@@ -2020,12 +2182,39 @@ Field 4 DZP
Field 3:0 BS
EndSysreg
+Sysreg GCSPR_EL0 3 3 2 5 1
+Fields GCSPR_ELx
+EndSysreg
+
Sysreg SVCR 3 3 4 2 2
Res0 63:2
Field 1 ZA
Field 0 SM
EndSysreg
+Sysreg FPMR 3 3 4 4 2
+Res0 63:38
+Field 37:32 LSCALE2
+Field 31:24 NSCALE
+Res0 23
+Field 22:16 LSCALE
+Field 15 OSC
+Field 14 OSM
+Res0 13:9
+UnsignedEnum 8:6 F8D
+ 0b000 E5M2
+ 0b001 E4M3
+EndEnum
+UnsignedEnum 5:3 F8S2
+ 0b000 E5M2
+ 0b001 E4M3
+EndEnum
+UnsignedEnum 2:0 F8S1
+ 0b000 E5M2
+ 0b001 E4M3
+EndEnum
+EndSysreg
+
SysregFields HFGxTR_EL2
Field 63 nAMAIR2_EL1
Field 62 nMAIR2_EL1
@@ -2102,7 +2291,9 @@ Fields HFGxTR_EL2
EndSysreg
Sysreg HFGITR_EL2 3 4 1 1 6
-Res0 63:61
+Res0 63
+Field 62 ATS1E1A
+Res0 61
Field 60 COSPRCTX
Field 59 nGCSEPP
Field 58 nGCSSTR_EL1
@@ -2295,12 +2486,57 @@ Field 1 DBGBVRn_EL1
Field 0 DBGBCRn_EL1
EndSysreg
+Sysreg HAFGRTR_EL2 3 4 3 1 6
+Res0 63:50
+Field 49 AMEVTYPER115_EL0
+Field 48 AMEVCNTR115_EL0
+Field 47 AMEVTYPER114_EL0
+Field 46 AMEVCNTR114_EL0
+Field 45 AMEVTYPER113_EL0
+Field 44 AMEVCNTR113_EL0
+Field 43 AMEVTYPER112_EL0
+Field 42 AMEVCNTR112_EL0
+Field 41 AMEVTYPER111_EL0
+Field 40 AMEVCNTR111_EL0
+Field 39 AMEVTYPER110_EL0
+Field 38 AMEVCNTR110_EL0
+Field 37 AMEVTYPER19_EL0
+Field 36 AMEVCNTR19_EL0
+Field 35 AMEVTYPER18_EL0
+Field 34 AMEVCNTR18_EL0
+Field 33 AMEVTYPER17_EL0
+Field 32 AMEVCNTR17_EL0
+Field 31 AMEVTYPER16_EL0
+Field 30 AMEVCNTR16_EL0
+Field 29 AMEVTYPER15_EL0
+Field 28 AMEVCNTR15_EL0
+Field 27 AMEVTYPER14_EL0
+Field 26 AMEVCNTR14_EL0
+Field 25 AMEVTYPER13_EL0
+Field 24 AMEVCNTR13_EL0
+Field 23 AMEVTYPER12_EL0
+Field 22 AMEVCNTR12_EL0
+Field 21 AMEVTYPER11_EL0
+Field 20 AMEVCNTR11_EL0
+Field 19 AMEVTYPER10_EL0
+Field 18 AMEVCNTR10_EL0
+Field 17 AMCNTEN1
+Res0 16:5
+Field 4 AMEVCNTR03_EL0
+Field 3 AMEVCNTR02_EL0
+Field 2 AMEVCNTR01_EL0
+Field 1 AMEVCNTR00_EL0
+Field 0 AMCNTEN0
+EndSysreg
+
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
Sysreg HCRX_EL2 3 4 1 2 2
-Res0 63:23
+Res0 63:25
+Field 24 PACMEn
+Field 23 EnFPM
Field 22 GCSEn
Field 21 EnIDCP128
Field 20 EnSDERR
@@ -2348,6 +2584,14 @@ Sysreg SMCR_EL2 3 4 1 2 6
Fields SMCR_ELx
EndSysreg
+Sysreg GCSCR_EL2 3 4 2 5 0
+Fields GCSCR_ELx
+EndSysreg
+
+Sysreg GCSPR_EL2 3 4 2 5 1
+Fields GCSPR_ELx
+EndSysreg
+
Sysreg DACR32_EL2 3 4 3 0 0
Res0 63:32
Field 31:30 D15
@@ -2407,6 +2651,14 @@ Sysreg SMCR_EL12 3 5 1 2 6
Fields SMCR_ELx
EndSysreg
+Sysreg GCSCR_EL12 3 5 2 5 0
+Fields GCSCR_ELx
+EndSysreg
+
+Sysreg GCSPR_EL12 3 5 2 5 1
+Fields GCSPR_ELx
+EndSysreg
+
Sysreg FAR_EL12 3 5 6 0 0
Field 63:0 ADDR
EndSysreg
@@ -2471,6 +2723,33 @@ Field 1 PIE
Field 0 PnCH
EndSysreg
+SysregFields MAIR2_ELx
+Field 63:56 Attr7
+Field 55:48 Attr6
+Field 47:40 Attr5
+Field 39:32 Attr4
+Field 31:24 Attr3
+Field 23:16 Attr2
+Field 15:8 Attr1
+Field 7:0 Attr0
+EndSysregFields
+
+Sysreg MAIR2_EL1 3 0 10 2 1
+Fields MAIR2_ELx
+EndSysreg
+
+Sysreg MAIR2_EL2 3 4 10 1 1
+Fields MAIR2_ELx
+EndSysreg
+
+Sysreg AMAIR2_EL1 3 0 10 3 1
+Field 63:0 ImpDef
+EndSysreg
+
+Sysreg AMAIR2_EL2 3 4 10 3 1
+Field 63:0 ImpDef
+EndSysreg
+
SysregFields PIRx_ELx
Field 63:60 Perm15
Field 59:56 Perm14
@@ -2510,6 +2789,26 @@ Sysreg PIR_EL2 3 4 10 2 3
Fields PIRx_ELx
EndSysreg
+Sysreg POR_EL0 3 3 10 2 4
+Fields PIRx_ELx
+EndSysreg
+
+Sysreg POR_EL1 3 0 10 2 4
+Fields PIRx_ELx
+EndSysreg
+
+Sysreg POR_EL12 3 5 10 2 4
+Fields PIRx_ELx
+EndSysreg
+
+Sysreg S2POR_EL1 3 0 10 2 5
+Fields PIRx_ELx
+EndSysreg
+
+Sysreg S2PIR_EL2 3 4 10 2 5
+Fields PIRx_ELx
+EndSysreg
+
Sysreg LORSA_EL1 3 0 10 4 0
Res0 63:52
Field 51:16 SA
diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h
index 1f7f1292f701..a60d26754caa 100644
--- a/arch/hexagon/include/asm/irq.h
+++ b/arch/hexagon/include/asm/irq.h
@@ -20,4 +20,7 @@
#include <asm-generic/irq.h>
+struct pt_regs;
+void arch_do_IRQ(struct pt_regs *);
+
#endif
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index dd7f74ea2c20..2a77bfd75694 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -5,6 +5,7 @@
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
@@ -152,6 +153,7 @@ unsigned long __get_wchan(struct task_struct *p)
* Returns 0 if there's no need to re-check for more work.
*/
+int do_work_pending(struct pt_regs *regs, u32 thread_info_flags);
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
if (!(thread_info_flags & _TIF_WORK_MASK)) {
diff --git a/arch/hexagon/kernel/reset.c b/arch/hexagon/kernel/reset.c
index da36114d928f..efd70a8d2526 100644
--- a/arch/hexagon/kernel/reset.c
+++ b/arch/hexagon/kernel/reset.c
@@ -3,6 +3,7 @@
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
+#include <linux/reboot.h>
#include <linux/smp.h>
#include <asm/hexagon_vm.h>
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index bcba31e9e0ae..d301f4621553 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -220,7 +220,7 @@ no_restart:
* Architecture-specific wrappers for signal-related system calls
*/
-asmlinkage int sys_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 4e8bee25b8c6..608884bc3396 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -79,7 +79,7 @@ void smp_vm_unmask_irq(void *info)
* Specifically, first arg is irq, second is the irq_desc.
*/
-irqreturn_t handle_ipi(int irq, void *desc)
+static irqreturn_t handle_ipi(int irq, void *desc)
{
int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
@@ -124,7 +124,7 @@ void __init smp_prepare_boot_cpu(void)
* to point to current thread info
*/
-void start_secondary(void)
+static void start_secondary(void)
{
unsigned long thread_ptr;
unsigned int cpu, irq;
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
index febc95714d75..f0f207e2a694 100644
--- a/arch/hexagon/kernel/time.c
+++ b/arch/hexagon/kernel/time.c
@@ -17,7 +17,9 @@
#include <linux/of_irq.h>
#include <linux/module.h>
+#include <asm/delay.h>
#include <asm/hexagon_vm.h>
+#include <asm/time.h>
#define TIMER_ENABLE BIT(0)
@@ -160,7 +162,7 @@ static irqreturn_t timer_interrupt(int irq, void *devid)
* This runs just before the delay loop is calibrated, and
* is used for delay calibration.
*/
-void __init time_init_deferred(void)
+static void __init time_init_deferred(void)
{
struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 6447763ce5a9..75e062722d28 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -281,6 +281,7 @@ static void cache_error(struct pt_regs *regs)
/*
* General exception handler
*/
+void do_genex(struct pt_regs *regs);
void do_genex(struct pt_regs *regs)
{
/*
@@ -331,13 +332,7 @@ void do_genex(struct pt_regs *regs)
}
}
-/* Indirect system call dispatch */
-long sys_syscall(void)
-{
- printk(KERN_ERR "sys_syscall invoked!\n");
- return -ENOSYS;
-}
-
+void do_trap0(struct pt_regs *regs);
void do_trap0(struct pt_regs *regs)
{
syscall_fn syscall;
@@ -415,6 +410,7 @@ void do_trap0(struct pt_regs *regs)
/*
* Machine check exception handler
*/
+void do_machcheck(struct pt_regs *regs);
void do_machcheck(struct pt_regs *regs)
{
/* Halt and catch fire */
@@ -425,6 +421,7 @@ void do_machcheck(struct pt_regs *regs)
* Treat this like the old 0xdb trap.
*/
+void do_debug_exception(struct pt_regs *regs);
void do_debug_exception(struct pt_regs *regs)
{
regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index b70970ac809f..2e4872d62124 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -10,6 +10,7 @@
#include <linux/vmalloc.h>
#include <linux/binfmts.h>
+#include <asm/elf.h>
#include <asm/vdso.h>
static struct page *vdso_page;
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
index 59ef72e4a4e5..2b881a89b206 100644
--- a/arch/hexagon/kernel/vm_events.c
+++ b/arch/hexagon/kernel/vm_events.c
@@ -73,13 +73,6 @@ void show_regs(struct pt_regs *regs)
pt_psp(regs), pt_badva(regs), ints_enabled(regs));
}
-void dummy_handler(struct pt_regs *regs)
-{
- unsigned int elr = pt_elr(regs);
- printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
-}
-
-
void arch_do_IRQ(struct pt_regs *regs)
{
int irq = pt_cause(regs);
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 146115c9de61..3458f39ca2ac 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <asm/tlb.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/vm_mmu.h>
/*
@@ -86,7 +87,7 @@ void sync_icache_dcache(pte_t pte)
* In this mode, we only have one pg_data_t
* structure: contig_mem_data.
*/
-void __init paging_init(void)
+static void __init paging_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index 650bca92f0b7..3204e9ba6d6f 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -35,11 +35,3 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
return count;
}
-
-unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
-{
- if (!access_ok(dest, count))
- return count;
- else
- return __clear_user_hexagon(dest, count);
-}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 7295ea3f8cc8..3771fb453898 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -12,6 +12,7 @@
*/
#include <asm/traps.h>
+#include <asm/vm_fault.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
@@ -33,7 +34,7 @@
/*
* Canonical page fault handler
*/
-void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
+static void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c
index 53482f2a9ff9..8b6405e2234b 100644
--- a/arch/hexagon/mm/vm_tlb.c
+++ b/arch/hexagon/mm/vm_tlb.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
+#include <asm/tlbflush.h>
/*
* Initial VM implementation has only one map active at a time, with
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 29d9b12298bc..8b5df1bbf9e9 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -523,6 +523,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
return pmd;
}
+#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 6e65ff12d5c7..8fe21f868f72 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -226,32 +226,6 @@ static void __init node_mem_init(unsigned int node)
#ifdef CONFIG_ACPI_NUMA
-/*
- * Sanity check to catch more bad NUMA configurations (they are amazingly
- * common). Make sure the nodes cover all memory.
- */
-static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
-{
- int i;
- u64 numaram, biosram;
-
- numaram = 0;
- for (i = 0; i < mi->nr_blks; i++) {
- u64 s = mi->blk[i].start >> PAGE_SHIFT;
- u64 e = mi->blk[i].end >> PAGE_SHIFT;
-
- numaram += e - s;
- numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
- if ((s64)numaram < 0)
- numaram = 0;
- }
- max_pfn = max_low_pfn;
- biosram = max_pfn - absent_pages_in_range(0, max_pfn);
-
- BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
- return true;
-}
-
static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
{
static unsigned long num_physpages;
@@ -396,7 +370,7 @@ int __init init_numa_memory(void)
return -EINVAL;
init_node_memblock();
- if (numa_meminfo_cover_memory(&numa_meminfo) == false)
+ if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
for_each_node_mask(node, node_possible_map) {
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index ad69b466a08b..9dcf245c9cbf 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -402,7 +402,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 7e6b74b6eecd..b4d71fea558f 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -453,6 +453,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -550,7 +551,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 0b403e2efcd5..682d8cd3dd3c 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -410,6 +410,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -507,7 +508,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 57aac3f4b001..15259ced8463 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -430,6 +430,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -527,7 +528,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 3c160636a2e9..7395c12caef6 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -402,6 +402,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -499,7 +500,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 23cf07c49d14..92506bc7f78d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -412,6 +412,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -509,7 +510,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 619a0d93ce5b..144bc8c0d8b5 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -269,9 +269,6 @@ CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_BRIDGE=m
CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
CONFIG_6LOWPAN=m
CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
CONFIG_6LOWPAN_GHC_UDP=m
@@ -432,6 +429,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -529,7 +527,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d9430bc2b2de..07594c729497 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -289,9 +289,6 @@ CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_BRIDGE=m
CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
CONFIG_6LOWPAN=m
CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
CONFIG_6LOWPAN_GHC_UDP=m
@@ -518,6 +515,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -615,7 +613,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index eb6132f29bf5..c34de6c1de20 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -401,6 +401,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -498,7 +499,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index d0bad674cbb7..83bc029d1f33 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -402,6 +402,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -499,7 +500,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index dad6bcfcaeed..4f551dac2ed7 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -419,6 +419,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -516,7 +517,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index eb1b489b3139..b1bf01182bd3 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -400,6 +400,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -497,7 +498,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 939589826546..5c9a3f71f036 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -400,6 +400,7 @@ CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS_FS=m
@@ -497,7 +498,6 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 7a4b780e82cb..7fd43fd4c9f2 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -456,3 +456,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 5b6a0b02b7de..b00ab2cabab9 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -462,3 +462,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index af2967bffb73..e2d623621a00 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -1,10 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# Fail on warnings - also for files referenced in subdirs
-# -Werror can be disabled for specific files using:
-# CFLAGS_<file.o> := -Wno-error
-ifeq ($(W),)
-subdir-ccflags-y := -Werror
-endif
# platform specific definitions
include $(srctree)/arch/mips/Kbuild.platforms
diff --git a/arch/mips/boot/compressed/dbg.c b/arch/mips/boot/compressed/dbg.c
index f6728a8fd1c3..2f1ac38fe1cc 100644
--- a/arch/mips/boot/compressed/dbg.c
+++ b/arch/mips/boot/compressed/dbg.c
@@ -9,6 +9,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include "decompress.h"
+
void __weak putc(char c)
{
}
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index c5dd415254d3..adb6d5b0e6eb 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -19,6 +19,8 @@
#include <asm/unaligned.h>
#include <asm-generic/vmlinux.lds.h>
+#include "decompress.h"
+
/*
* These two variables specify the free mem region
* that can be used for temporary malloc area
@@ -26,20 +28,6 @@
unsigned long free_mem_ptr;
unsigned long free_mem_end_ptr;
-/* The linker tells us where the image is. */
-extern unsigned char __image_begin[], __image_end[];
-
-/* debug interfaces */
-#ifdef CONFIG_DEBUG_ZBOOT
-extern void puts(const char *s);
-extern void puthex(unsigned long long val);
-#else
-#define puts(s) do {} while (0)
-#define puthex(val) do {} while (0)
-#endif
-
-extern char __appended_dtb[];
-
void error(char *x)
{
puts("\n\n");
diff --git a/arch/mips/boot/compressed/decompress.h b/arch/mips/boot/compressed/decompress.h
new file mode 100644
index 000000000000..073b64593b3d
--- /dev/null
+++ b/arch/mips/boot/compressed/decompress.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _DECOMPRESSOR_H
+#define _DECOMPRESSOR_H
+
+/* The linker tells us where the image is. */
+extern unsigned char __image_begin[], __image_end[];
+
+/* debug interfaces */
+#ifdef CONFIG_DEBUG_ZBOOT
+extern void putc(char c);
+extern void puts(const char *s);
+extern void puthex(unsigned long long val);
+#else
+#define putc(s) do {} while (0)
+#define puts(s) do {} while (0)
+#define puthex(val) do {} while (0)
+#endif
+
+extern char __appended_dtb[];
+
+void error(char *x);
+void decompress_kernel(unsigned long boot_heap_start);
+
+#endif
diff --git a/arch/mips/boot/compressed/string.c b/arch/mips/boot/compressed/string.c
index 0b593b709228..f0eb251e44e5 100644
--- a/arch/mips/boot/compressed/string.c
+++ b/arch/mips/boot/compressed/string.c
@@ -7,6 +7,7 @@
#include <linux/compiler_attributes.h>
#include <linux/types.h>
+#include <asm/string.h>
void *memcpy(void *dest, const void *src, size_t n)
{
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index 3424a7908c0f..8b08db3fb17a 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -17,5 +17,11 @@
#define __read_mostly __section(".data..read_mostly")
extern void cache_noop(void);
+extern void r3k_cache_init(void);
+extern unsigned long r3k_cache_size(unsigned long);
+extern unsigned long r3k_cache_lsize(unsigned long);
+extern void r4k_cache_init(void);
+extern void octeon_cache_init(void);
+extern void au1x00_fixup_config_od(void);
#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index c5c6864e64bc..081be98c71ef 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -15,6 +15,9 @@
#include <linux/types.h>
#include <asm/isa-rev.h>
+struct module;
+extern void jump_label_apply_nops(struct module *mod);
+
#define JUMP_LABEL_NOP_SIZE 4
#ifdef CONFIG_64BIT
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index ebb1deaa77b9..a3d65d37b8b5 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -18,7 +18,6 @@ extern struct pglist_data *__node_data[];
#define NODE_DATA(n) (__node_data[n])
-extern void setup_zero_pages(void);
extern void __init prom_init_numa_memory(void);
#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index 602a21aee9d4..14226ea42036 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -20,4 +20,6 @@
#define nid_to_addrbase(nid) 0
#endif
+extern void setup_zero_pages(void);
+
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 430b208c0130..e27a4c83c548 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -655,6 +655,7 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
return pmd;
}
+#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_MODIFIED);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ae2cd37a38f0..ca7662cc65a7 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -402,4 +402,6 @@ extern int mips_set_process_fp_mode(struct task_struct *task,
#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
+void show_registers(struct pt_regs *regs);
+
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 431a1c9d53fc..da1cd1bbdbc5 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -24,6 +24,10 @@
#include <asm/mmzone.h>
#include <asm/unroll.h>
+extern void r5k_sc_init(void);
+extern void rm7k_sc_init(void);
+extern int mips_sc_init(void);
+
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 8c56b862fd9c..4dce41138bad 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -27,5 +27,6 @@ extern unsigned long ebase;
extern unsigned int hwrena;
extern void per_cpu_trap_init(bool);
extern void cpu_cache_init(void);
+extern void tlb_init(void);
#endif /* __SETUP_H */
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 23d6b8015c79..8de81ccef7ad 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -31,5 +31,6 @@ extern struct mips_abi mips_abi_32;
extern int protected_save_fp_context(void __user *sc);
extern int protected_restore_fp_context(void __user *sc);
+void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags);
#endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 901bc61fa7ae..695928637db4 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -63,6 +63,8 @@ extern asmlinkage void smp_bootstrap(void);
extern void calculate_cpu_foreign_map(void);
+asmlinkage void start_secondary(void);
+
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 373f2a5d495d..9f6a2cb1943a 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -3,7 +3,7 @@
#define _MIPS_SPRAM_H
#if defined(CONFIG_MIPS_SPRAM)
-extern __init void spram_config(void);
+extern void spram_config(void);
#else
static inline void spram_config(void) { }
#endif /* CONFIG_MIPS_SPRAM */
diff --git a/arch/mips/include/asm/syscalls.h b/arch/mips/include/asm/syscalls.h
new file mode 100644
index 000000000000..59f9c0c9fa0a
--- /dev/null
+++ b/arch/mips/include/asm/syscalls.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_MIPS_SYSCALLS_H
+#define _ASM_MIPS_SYSCALLS_H
+
+#include <linux/linkage.h>
+#include <linux/compat.h>
+
+asmlinkage void sys_sigreturn(void);
+asmlinkage void sys_rt_sigreturn(void);
+asmlinkage int sysm_pipe(void);
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr);
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr);
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
+ unsigned offset_a3, unsigned len_a4,
+ unsigned len_a5);
+asmlinkage long sys32_fadvise64_64(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags);
+asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
+ size_t count);
+asmlinkage long sys32_sync_file_range(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags);
+asmlinkage void sys32_rt_sigreturn(void);
+asmlinkage void sys32_sigreturn(void);
+asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset);
+asmlinkage void sysn32_rt_sigreturn(void);
+
+#endif
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h
index 6d97e23f30ab..24a2d06cc1c3 100644
--- a/arch/mips/include/asm/tlbex.h
+++ b/arch/mips/include/asm/tlbex.h
@@ -23,6 +23,7 @@ void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
enum tlb_write_entry wmode);
+void build_tlb_refill_handler(void);
extern void handle_tlbl(void);
extern char handle_tlbl_end[];
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 15cde638b407..2c2b26f1e464 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -39,4 +39,28 @@ extern char except_vec_nmi[];
register_nmi_notifier(&fn##_nb); \
})
+asmlinkage void do_ade(struct pt_regs *regs);
+asmlinkage void do_be(struct pt_regs *regs);
+asmlinkage void do_ov(struct pt_regs *regs);
+asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31);
+asmlinkage void do_bp(struct pt_regs *regs);
+asmlinkage void do_tr(struct pt_regs *regs);
+asmlinkage void do_ri(struct pt_regs *regs);
+asmlinkage void do_cpu(struct pt_regs *regs);
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr);
+asmlinkage void do_msa(struct pt_regs *regs);
+asmlinkage void do_mdmx(struct pt_regs *regs);
+asmlinkage void do_watch(struct pt_regs *regs);
+asmlinkage void do_mcheck(struct pt_regs *regs);
+asmlinkage void do_mt(struct pt_regs *regs);
+asmlinkage void do_dsp(struct pt_regs *regs);
+asmlinkage void do_reserved(struct pt_regs *regs);
+asmlinkage void do_ftlb(void);
+asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1);
+asmlinkage void do_daddi_ov(struct pt_regs *regs);
+
+asmlinkage void cache_parity_error(void);
+asmlinkage void ejtag_exception_handler(struct pt_regs *regs);
+asmlinkage void __noreturn nmi_exception_handler(struct pt_regs *regs);
+
#endif /* _ASM_TRAPS_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 296bcf31abb5..b43bfd445252 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -193,9 +193,7 @@ struct uasm_label {
void uasm_build_label(struct uasm_label **lab, u32 *addr,
int lid);
-#ifdef CONFIG_64BIT
int uasm_in_compat_space_p(long addr);
-#endif
int uasm_rel_hi(long val);
int uasm_rel_lo(long val);
void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b406d8bfb15a..de7460c3a72e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -179,7 +179,6 @@ void __init check_bugs32(void)
static inline int cpu_has_confreg(void)
{
#ifdef CONFIG_CPU_R3000
- extern unsigned long r3k_cache_size(unsigned long);
unsigned long size1, size2;
unsigned long cfg = read_c0_conf();
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
index be93469c0e0e..0c826f729f75 100644
--- a/arch/mips/kernel/cpu-r3k-probe.c
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -42,7 +42,6 @@ void __init check_bugs32(void)
static inline int cpu_has_confreg(void)
{
#ifdef CONFIG_CPU_R3000
- extern unsigned long r3k_cache_size(unsigned long);
unsigned long size1, size2;
unsigned long cfg = read_c0_conf();
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 6b61be486303..a0c0a7a654e9 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -42,6 +42,7 @@
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/mman.h>
+#include <asm/syscalls.h>
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 432bfd3e7f22..4e3579bbd620 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
+#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 3f00788b0871..84b3affb9de8 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -201,7 +201,7 @@ phys_addr_t __mips_cm_phys_base(void)
phys_addr_t mips_cm_phys_base(void)
__attribute__((weak, alias("__mips_cm_phys_base")));
-phys_addr_t __mips_cm_l2sync_phys_base(void)
+static phys_addr_t __mips_cm_l2sync_phys_base(void)
{
u32 base_reg;
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 67e130d3f038..10172fc4f627 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -15,6 +15,7 @@
#include <linux/security.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <asm/syscalls.h>
/*
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index f88b7919f11f..c07d64438b5b 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -19,6 +19,7 @@
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
+#include <asm/mips_mt.h>
int vpelimit;
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 0c936cbf20c5..7b2fbaa9cac5 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -20,8 +20,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/jump_label.h>
-
-extern void jump_label_apply_nops(struct module *mod);
+#include <asm/jump_label.h>
struct mips_hi16 {
struct mips_hi16 *next;
diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c
index 6ffefb2c6971..1e300330078d 100644
--- a/arch/mips/kernel/r4k-bugs64.c
+++ b/arch/mips/kernel/r4k-bugs64.c
@@ -14,6 +14,7 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/setup.h>
+#include <asm/traps.h>
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index f50d48435c68..136eb20ac024 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -40,4 +40,7 @@ _restore_fp_context(void __user *fpregs, void __user *csr);
extern asmlinkage int _save_msa_all_upper(void __user *buf);
extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
+extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
+
#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 479999b7f2de..ccbf580827f6 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -38,6 +38,7 @@
#include <asm/dsp.h>
#include <asm/inst.h>
#include <asm/msa.h>
+#include <asm/syscalls.h>
#include "signal-common.h"
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 59b8965433c2..73081d4ee8c1 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -18,6 +18,7 @@
#include <asm/compat-signal.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
+#include <asm/syscalls.h>
#include "signal-common.h"
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index cfc77b69420a..139d2596b0d4 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -24,6 +24,7 @@
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/cpu-features.h>
+#include <asm/syscalls.h>
#include "signal-common.h"
@@ -32,9 +33,6 @@
*/
#define __NR_N32_restart_syscall 6214
-extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
-extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
-
struct ucontextn32 {
u32 uc_flags;
s32 uc_link;
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
index 299a7a28ca33..4f0458459650 100644
--- a/arch/mips/kernel/signal_o32.c
+++ b/arch/mips/kernel/signal_o32.c
@@ -19,6 +19,7 @@
#include <asm/dsp.h>
#include <asm/sim.h>
#include <asm/unistd.h>
+#include <asm/syscalls.h>
#include "signal-common.h"
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 82e2e051b416..0b53d35a116e 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
@@ -468,11 +469,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0;
}
+#ifdef CONFIG_PROFILING
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+#endif
static void flush_tlb_all_ipi(void *info)
{
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index d5d96214cce5..71c7e5e27567 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -12,6 +12,7 @@
#include <asm/mipsregs.h>
#include <asm/r4kcache.h>
#include <asm/hazards.h>
+#include <asm/spram.h>
/*
* These definitions are correct for the 24K/34K/74K SPRAM sample
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index ae93a607ddf7..1bfc34a2e5b3 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -39,6 +39,7 @@
#include <asm/shmparam.h>
#include <asm/sync.h>
#include <asm/sysmips.h>
+#include <asm/syscalls.h>
#include <asm/switch_to.h>
/*
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index a842b41c8e06..83cfc9eb6b88 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -395,3 +395,8 @@
454 n32 futex_wake sys_futex_wake
455 n32 futex_wait sys_futex_wait
456 n32 futex_requeue sys_futex_requeue
+457 n32 statmount sys_statmount
+458 n32 listmount sys_listmount
+459 n32 lsm_get_self_attr sys_lsm_get_self_attr
+460 n32 lsm_set_self_attr sys_lsm_set_self_attr
+461 n32 lsm_list_modules sys_lsm_list_modules
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 116ff501bf92..532b855df589 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -371,3 +371,8 @@
454 n64 futex_wake sys_futex_wake
455 n64 futex_wait sys_futex_wait
456 n64 futex_requeue sys_futex_requeue
+457 n64 statmount sys_statmount
+458 n64 listmount sys_listmount
+459 n64 lsm_get_self_attr sys_lsm_get_self_attr
+460 n64 lsm_set_self_attr sys_lsm_set_self_attr
+461 n64 lsm_list_modules sys_lsm_list_modules
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 525cc54bc63b..f45c9530ea93 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -444,3 +444,8 @@
454 o32 futex_wake sys_futex_wake
455 o32 futex_wait sys_futex_wait
456 o32 futex_requeue sys_futex_requeue
+457 o32 statmount sys_statmount
+458 o32 listmount sys_listmount
+459 o32 lsm_get_self_attr sys_lsm_get_self_attr
+460 o32 lsm_set_self_attr sys_lsm_set_self_attr
+461 o32 lsm_list_modules sys_lsm_list_modules
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 246c6a6b0261..c58c0c3c5b40 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2157,8 +2157,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-extern void tlb_init(void);
-
/*
* Timer interrupt
*/
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index f4cf94e92ec3..db652c99b72e 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -91,6 +91,7 @@
#include <asm/inst.h>
#include <asm/unaligned-emul.h>
#include <asm/mmu_context.h>
+#include <asm/traps.h>
#include <linux/uaccess.h>
#include "access-helper.h"
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 187d1c16361c..b45bf026ee55 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1485,10 +1485,6 @@ static void loongson3_sc_init(void)
return;
}
-extern int r5k_sc_init(void);
-extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
-
static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1828,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = {
.notifier_call = r4k_cache_pm_notifier,
};
-int __init r4k_cache_init_pm(void)
+static int __init r4k_cache_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7f830634dbe7..df1ced4fc3b5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -205,22 +205,13 @@ static inline void setup_protection_map(void)
void cpu_cache_init(void)
{
- if (cpu_has_3k_cache) {
- extern void __weak r3k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
r3k_cache_init();
- }
- if (cpu_has_4k_cache) {
- extern void __weak r4k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
r4k_cache_init();
- }
-
- if (cpu_has_octeon_cache) {
- extern void __weak octeon_cache_init(void);
+ if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
octeon_cache_init();
- }
setup_protection_map();
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index d7878208bd3f..aaa9a242ebba 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -26,6 +26,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <asm/traps.h>
#include <linux/kdebug.h>
int show_unhandled_signals = 1;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5dcb525a8995..c2e0e5aebe90 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -38,6 +38,7 @@
#include <asm/dma.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index c76d21f7dffb..1e544827dea9 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -89,6 +89,7 @@ void pud_init(void *addr)
}
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
@@ -103,6 +104,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
{
*pmdp = pmd;
}
+#endif
void __init pagetable_init(void)
{
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 53dfa2b9316b..f6db65410c65 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -23,11 +23,11 @@
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
+#include <asm/tlbex.h>
#undef DEBUG_TLB
-extern void build_tlb_refill_handler(void);
-
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 93c2d695588a..7e2a0011a6fb 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -22,9 +22,9 @@
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
+#include <asm/tlbex.h>
#include <asm/tlbmisc.h>
-
-extern void build_tlb_refill_handler(void);
+#include <asm/setup.h>
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
@@ -458,6 +458,7 @@ EXPORT_SYMBOL(has_transparent_hugepage);
int temp_tlb_entry;
+#ifndef CONFIG_64BIT
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -496,6 +497,7 @@ out:
local_irq_restore(flags);
return ret;
}
+#endif
static int ntlb;
static int __init set_ntlb(char *str)
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
index a15e29dfc7b3..d8ef7778e535 100644
--- a/arch/mips/power/cpu.c
+++ b/arch/mips/power/cpu.c
@@ -6,6 +6,7 @@
* Author: Hu Hongbing <huhb@lemote.com>
* Wu Zhangjin <wuzhangjin@gmail.com>
*/
+#include <linux/suspend.h>
#include <asm/sections.h>
#include <asm/fpu.h>
#include <asm/dsp.h>
diff --git a/arch/mips/power/hibernate.c b/arch/mips/power/hibernate.c
index 94ab17c3c49d..192879e76c85 100644
--- a/arch/mips/power/hibernate.c
+++ b/arch/mips/power/hibernate.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/suspend.h>
#include <asm/tlbflush.h>
extern int restore_image(void);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index d54464021a61..58d9565dc2c7 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -50,7 +50,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/parisc/kernel/kexec_file.c b/arch/parisc/kernel/kexec_file.c
index 8c534204f0fd..3fc82130b6c3 100644
--- a/arch/parisc/kernel/kexec_file.c
+++ b/arch/parisc/kernel/kexec_file.c
@@ -38,8 +38,8 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
for (i = 0; i < image->nr_segments; i++)
image->segment[i].mem = __pa(image->segment[i].mem);
- pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n",
- kernel_load_addr, image->start);
+ kexec_dprintk("Loaded the kernel at 0x%lx, entry at 0x%lx\n",
+ kernel_load_addr, image->start);
if (initrd != NULL) {
kbuf.buffer = initrd;
@@ -51,7 +51,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
- pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem);
+ kexec_dprintk("Loaded initrd at 0x%lx\n", kbuf.mem);
image->arch.initrd_start = kbuf.mem;
image->arch.initrd_end = kbuf.mem + initrd_len;
}
@@ -68,7 +68,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
- pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem);
+ kexec_dprintk("Loaded cmdline at 0x%lx\n", kbuf.mem);
image->arch.cmdline = kbuf.mem;
}
out:
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index a47798fed54e..b236a84c4e12 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -455,3 +455,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1f11a62809f2..414b978b8010 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -189,6 +189,7 @@ config PPC
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if ARCH_USING_PATCHABLE_FUNCTION_ENTRY
+ select FUNCTION_ALIGNMENT_4B
select GENERIC_ATOMIC64 if PPC32
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
@@ -915,7 +916,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ea4033abc07d..8c80b154e814 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -271,7 +271,6 @@ config PPC_EARLY_DEBUG_USBGECKO
config PPC_EARLY_DEBUG_PS3GELIC
bool "Early debugging through the PS3 Ethernet port"
depends on PPC_PS3
- select PS3GELIC_UDBG
help
Select this to enable early debugging for the PlayStation3 via
UDP broadcasts sent out through the Ethernet port.
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index f19dbaa1d541..051247027da0 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -10,15 +10,26 @@
# Rewritten by Cort Dougan and Paul Mackerras
#
+ifdef cross_compiling
+ ifeq ($(CROSS_COMPILE),)
+ # Auto detect cross compiler prefix.
+ # Look for: (powerpc(64(le)?)?)(-unknown)?-linux(-gnu)?-
+ CC_ARCHES := powerpc powerpc64 powerpc64le
+ CC_SUFFIXES := linux linux-gnu unknown-linux-gnu
+ CROSS_COMPILE := $(call cc-cross-prefix, $(foreach a,$(CC_ARCHES), \
+ $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
+ endif
+endif
+
HAS_BIARCH := $(call cc-option-yn, -m32)
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
# If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
-# ppc64_defconfig because we have nothing better to go on.
+# ppc64le_defconfig because we have nothing better to go on.
uname := $(shell uname -m)
-KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
+KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64le)_defconfig
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
@@ -161,7 +172,7 @@ CFLAGS-y += $(CONFIG_TUNE_CPU)
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
-KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
+KBUILD_CPPFLAGS += -I $(srctree)/arch/powerpc $(asinstr)
KBUILD_AFLAGS += $(AFLAGS-y)
KBUILD_CFLAGS += $(call cc-option,-msoft-float)
KBUILD_CFLAGS += $(CFLAGS-y)
@@ -232,7 +243,7 @@ BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% uImage.%
PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
-boot := arch/$(ARCH)/boot
+boot := arch/powerpc/boot
$(BOOT_TARGETS1): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
@@ -336,7 +347,7 @@ PHONY += $(generated_configs)
define archhelp
echo '* zImage - Build default images selected by kernel config'
- echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
+ echo ' zImage.* - Compressed kernel image (arch/powerpc/boot/zImage.*)'
echo ' uImage - U-Boot native image format'
echo ' cuImage.<dt> - Backwards compatible U-Boot image for older'
echo ' versions which do not support device trees'
@@ -347,12 +358,12 @@ define archhelp
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
- echo ' *_defconfig - Select default config from arch/$(ARCH)/configs'
+ echo ' *_defconfig - Select default config from arch/powerpc/configs'
echo ''
echo ' Targets with <dt> embed a device tree blob inside the image'
echo ' These targets support board with firmware that does not'
echo ' support passing a device tree directly. Replace <dt> with the'
- echo ' name of a dts file from the arch/$(ARCH)/boot/dts/ directory'
+ echo ' name of a dts file from the arch/powerpc/boot/dts/ directory'
echo ' (minus the .dts extension).'
echo
$(foreach cfg,$(generated_configs),
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index d552044c5afc..aa5152ca8120 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -367,45 +367,46 @@
reg = <0xf0000 0x1000>;
interrupts = <18 2 0 0>;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>;
- fsl,tmu-calibration = <0x00000000 0x0000000f
- 0x00000001 0x00000017
- 0x00000002 0x0000001e
- 0x00000003 0x00000026
- 0x00000004 0x0000002e
- 0x00000005 0x00000035
- 0x00000006 0x0000003d
- 0x00000007 0x00000044
- 0x00000008 0x0000004c
- 0x00000009 0x00000053
- 0x0000000a 0x0000005b
- 0x0000000b 0x00000064
-
- 0x00010000 0x00000011
- 0x00010001 0x0000001c
- 0x00010002 0x00000024
- 0x00010003 0x0000002b
- 0x00010004 0x00000034
- 0x00010005 0x00000039
- 0x00010006 0x00000042
- 0x00010007 0x0000004c
- 0x00010008 0x00000051
- 0x00010009 0x0000005a
- 0x0001000a 0x00000063
-
- 0x00020000 0x00000013
- 0x00020001 0x00000019
- 0x00020002 0x00000024
- 0x00020003 0x0000002c
- 0x00020004 0x00000035
- 0x00020005 0x0000003d
- 0x00020006 0x00000046
- 0x00020007 0x00000050
- 0x00020008 0x00000059
-
- 0x00030000 0x00000002
- 0x00030001 0x0000000d
- 0x00030002 0x00000019
- 0x00030003 0x00000024>;
+ fsl,tmu-calibration =
+ <0x00000000 0x0000000f>,
+ <0x00000001 0x00000017>,
+ <0x00000002 0x0000001e>,
+ <0x00000003 0x00000026>,
+ <0x00000004 0x0000002e>,
+ <0x00000005 0x00000035>,
+ <0x00000006 0x0000003d>,
+ <0x00000007 0x00000044>,
+ <0x00000008 0x0000004c>,
+ <0x00000009 0x00000053>,
+ <0x0000000a 0x0000005b>,
+ <0x0000000b 0x00000064>,
+
+ <0x00010000 0x00000011>,
+ <0x00010001 0x0000001c>,
+ <0x00010002 0x00000024>,
+ <0x00010003 0x0000002b>,
+ <0x00010004 0x00000034>,
+ <0x00010005 0x00000039>,
+ <0x00010006 0x00000042>,
+ <0x00010007 0x0000004c>,
+ <0x00010008 0x00000051>,
+ <0x00010009 0x0000005a>,
+ <0x0001000a 0x00000063>,
+
+ <0x00020000 0x00000013>,
+ <0x00020001 0x00000019>,
+ <0x00020002 0x00000024>,
+ <0x00020003 0x0000002c>,
+ <0x00020004 0x00000035>,
+ <0x00020005 0x0000003d>,
+ <0x00020006 0x00000046>,
+ <0x00020007 0x00000050>,
+ <0x00020008 0x00000059>,
+
+ <0x00030000 0x00000002>,
+ <0x00030001 0x0000000d>,
+ <0x00030002 0x00000019>,
+ <0x00030003 0x00000024>;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index ad0ab33336b8..776788623204 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -447,41 +447,42 @@
reg = <0xf0000 0x1000>;
interrupts = <18 2 0 0>;
fsl,tmu-range = <0xa0000 0x90026 0x8004a 0x1006a>;
- fsl,tmu-calibration = <0x00000000 0x00000025
- 0x00000001 0x00000028
- 0x00000002 0x0000002d
- 0x00000003 0x00000031
- 0x00000004 0x00000036
- 0x00000005 0x0000003a
- 0x00000006 0x00000040
- 0x00000007 0x00000044
- 0x00000008 0x0000004a
- 0x00000009 0x0000004f
- 0x0000000a 0x00000054
-
- 0x00010000 0x0000000d
- 0x00010001 0x00000013
- 0x00010002 0x00000019
- 0x00010003 0x0000001f
- 0x00010004 0x00000025
- 0x00010005 0x0000002d
- 0x00010006 0x00000033
- 0x00010007 0x00000043
- 0x00010008 0x0000004b
- 0x00010009 0x00000053
-
- 0x00020000 0x00000010
- 0x00020001 0x00000017
- 0x00020002 0x0000001f
- 0x00020003 0x00000029
- 0x00020004 0x00000031
- 0x00020005 0x0000003c
- 0x00020006 0x00000042
- 0x00020007 0x0000004d
- 0x00020008 0x00000056
-
- 0x00030000 0x00000012
- 0x00030001 0x0000001d>;
+ fsl,tmu-calibration =
+ <0x00000000 0x00000025>,
+ <0x00000001 0x00000028>,
+ <0x00000002 0x0000002d>,
+ <0x00000003 0x00000031>,
+ <0x00000004 0x00000036>,
+ <0x00000005 0x0000003a>,
+ <0x00000006 0x00000040>,
+ <0x00000007 0x00000044>,
+ <0x00000008 0x0000004a>,
+ <0x00000009 0x0000004f>,
+ <0x0000000a 0x00000054>,
+
+ <0x00010000 0x0000000d>,
+ <0x00010001 0x00000013>,
+ <0x00010002 0x00000019>,
+ <0x00010003 0x0000001f>,
+ <0x00010004 0x00000025>,
+ <0x00010005 0x0000002d>,
+ <0x00010006 0x00000033>,
+ <0x00010007 0x00000043>,
+ <0x00010008 0x0000004b>,
+ <0x00010009 0x00000053>,
+
+ <0x00020000 0x00000010>,
+ <0x00020001 0x00000017>,
+ <0x00020002 0x0000001f>,
+ <0x00020003 0x00000029>,
+ <0x00020004 0x00000031>,
+ <0x00020005 0x0000003c>,
+ <0x00020006 0x00000042>,
+ <0x00020007 0x0000004d>,
+ <0x00020008 0x00000056>,
+
+ <0x00030000 0x00000012>,
+ <0x00030001 0x0000001d>;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6e7b9e8fd225..544a65fda77b 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -92,6 +92,7 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZONE_DEVICE=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2b175ddf82f0..aa8bb0208bcc 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -24,6 +24,7 @@ CONFIG_PS3_VRAM=m
CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_KEXEC=y
+# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set
CONFIG_PPC_4K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PM=y
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb77eddca54b..927d585652bc 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -17,12 +17,6 @@
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
-#define _PAGE_NA _PAGE_PRIVILEGED
-#define _PAGE_NAX _PAGE_EXEC
-#define _PAGE_RO _PAGE_READ
-#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
-#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
-#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
#define _PAGE_SAO 0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
@@ -532,8 +526,8 @@ static inline bool pte_user(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
- * _PAGE_READ is needed for any access and will be
- * cleared for PROT_NONE
+ * _PAGE_READ is needed for any access and will be cleared for
+ * PROT_NONE. Execute-only mapping via PROT_EXEC also returns false.
*/
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
return false;
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 1950c1b825b4..fd642b729775 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -158,11 +158,6 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
*/
}
-static inline bool __pte_protnone(unsigned long pte)
-{
- return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
-}
-
static inline bool __pte_flags_need_flush(unsigned long oldval,
unsigned long newval)
{
@@ -179,8 +174,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
/*
* We do not expect kernel mappings or non-PTEs or not-present PTEs.
*/
- VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
- VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 9e5a39b6a311..1ebd2ca97f12 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -25,7 +25,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
addr += MCOUNT_INSN_SIZE;
- return addr;
+ return addr;
}
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index ddb99e982917..a41e542ba94d 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -349,7 +349,16 @@
#define H_GET_ENERGY_SCALE_INFO 0x450
#define H_PKS_SIGNED_UPDATE 0x454
#define H_WATCHDOG 0x45C
-#define MAX_HCALL_OPCODE H_WATCHDOG
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_COPY_MEMORY 0x484
+#define H_GUEST_DELETE 0x488
+#define MAX_HCALL_OPCODE H_GUEST_DELETE
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
@@ -393,15 +402,6 @@
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
-#define H_GUEST_GET_CAPABILITIES 0x460
-#define H_GUEST_SET_CAPABILITIES 0x464
-#define H_GUEST_CREATE 0x470
-#define H_GUEST_CREATE_VCPU 0x474
-#define H_GUEST_GET_STATE 0x478
-#define H_GUEST_SET_STATE 0x47C
-#define H_GUEST_RUN_VCPU 0x480
-#define H_GUEST_COPY_MEMORY 0x484
-#define H_GUEST_DELETE 0x488
/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4f527d09c92b..3e1e2a698c9e 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -302,6 +302,7 @@ void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
+void kvmhv_flush_lpid(u64 lpid);
void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
@@ -593,13 +594,17 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
-KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+
+static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.vcore->tb_offset;
+}
static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
{
- WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
return vcpu->arch.dec_expires;
}
@@ -607,7 +612,6 @@ static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.dec_expires = val;
- WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
}
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2477021bff54..d8729ec81ca0 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -682,6 +682,7 @@ void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *i
int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index b88d1d2cf304..b71b9582e754 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -4,9 +4,6 @@
#include <asm/types.h>
-#define __ALIGN .align 2
-#define __ALIGN_STR ".align 2"
-
#ifdef CONFIG_PPC64_ELF_ABI_V1
#define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 52cc25864a1b..d8b7e246a32f 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -412,5 +412,9 @@ extern void *abatron_pteptrs[2];
#include <asm/nohash/mmu.h>
#endif
+#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 4c6c6dbd182f..da827d2d0866 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -42,14 +42,6 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_FA_DUMP
-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
-#endif
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-extern int create_section_mapping(unsigned long start, unsigned long end,
- int nid, pgprot_t prot);
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/papr-sysparm.h b/arch/powerpc/include/asm/papr-sysparm.h
index f5fdbd8ae9db..0dbbff59101d 100644
--- a/arch/powerpc/include/asm/papr-sysparm.h
+++ b/arch/powerpc/include/asm/papr-sysparm.h
@@ -2,8 +2,10 @@
#ifndef _ASM_POWERPC_PAPR_SYSPARM_H
#define _ASM_POWERPC_PAPR_SYSPARM_H
+#include <uapi/asm/papr-sysparm.h>
+
typedef struct {
- const u32 token;
+ u32 token;
} papr_sysparm_t;
#define mk_papr_sysparm(x_) ((papr_sysparm_t){ .token = x_, })
@@ -20,11 +22,14 @@ typedef struct {
#define PAPR_SYSPARM_TLB_BLOCK_INVALIDATE_ATTRS mk_papr_sysparm(50)
#define PAPR_SYSPARM_LPAR_NAME mk_papr_sysparm(55)
-enum {
- PAPR_SYSPARM_MAX_INPUT = 1024,
- PAPR_SYSPARM_MAX_OUTPUT = 4000,
-};
-
+/**
+ * struct papr_sysparm_buf - RTAS work area layout for system parameter functions.
+ *
+ * This is the memory layout of the buffers passed to/from
+ * ibm,get-system-parameter and ibm,set-system-parameter. It is
+ * distinct from the papr_sysparm_io_block structure that is passed
+ * between user space and the kernel.
+ */
struct papr_sysparm_buf {
__be16 len;
char val[PAPR_SYSPARM_MAX_OUTPUT];
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index ac4279208d63..b78b82d66057 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -76,6 +76,17 @@ static inline bool is_vcpu_idle(int vcpu)
{
return lppaca_of(vcpu).idle;
}
+
+static inline bool vcpu_is_dispatched(int vcpu)
+{
+ /*
+ * This is the yield_count. An "odd" value (low bit on) means that
+ * the processor is yielded (either because of an OS yield or a
+ * hypervisor preempt). An even value implies that the processor is
+ * currently executing.
+ */
+ return (!(yield_count_of(vcpu) & 1));
+}
#else
static inline bool is_shared_processor(void)
{
@@ -109,6 +120,10 @@ static inline bool is_vcpu_idle(int vcpu)
{
return false;
}
+static inline bool vcpu_is_dispatched(int vcpu)
+{
+ return true;
+}
#endif
#define vcpu_is_preempted vcpu_is_preempted
@@ -134,12 +149,12 @@ static inline bool vcpu_is_preempted(int cpu)
* If the hypervisor has dispatched the target CPU on a physical
* processor, then the target CPU is definitely not preempted.
*/
- if (!(yield_count_of(cpu) & 1))
+ if (vcpu_is_dispatched(cpu))
return false;
/*
- * If the target CPU has yielded to Hypervisor but OS has not
- * requested idle then the target CPU is definitely preempted.
+ * if the target CPU is not dispatched and the guest OS
+ * has not marked the CPU idle, then it is hypervisor preempted.
*/
if (!is_vcpu_idle(cpu))
return true;
@@ -166,7 +181,7 @@ static inline bool vcpu_is_preempted(int cpu)
/*
* The PowerVM hypervisor dispatches VMs on a whole core
- * basis. So we know that a thread sibling of the local CPU
+ * basis. So we know that a thread sibling of the executing CPU
* cannot have been preempted by the hypervisor, even if it
* has called H_CONFER, which will set the yield bit.
*/
@@ -174,15 +189,17 @@ static inline bool vcpu_is_preempted(int cpu)
return false;
/*
- * If any of the threads of the target CPU's core are not
- * preempted or ceded, then consider target CPU to be
- * non-preempted.
+ * The specific target CPU was marked by guest OS as idle, but
+ * then also check all other cpus in the core for PowerVM
+ * because it does core scheduling and one of the vcpu
+ * of the core getting preempted by hypervisor implies
+ * other vcpus can also be considered preempted.
*/
first_cpu = cpu_first_thread_sibling(cpu);
for (i = first_cpu; i < first_cpu + threads_per_core; i++) {
if (i == cpu)
continue;
- if (!(yield_count_of(i) & 1))
+ if (vcpu_is_dispatched(i))
return false;
if (!is_vcpu_idle(i))
return true;
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index d9fcff575027..ce2b1b5eebdd 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -35,6 +35,9 @@ extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);
extern int rtas_setup_phb(struct pci_controller *phb);
+int rtas_pci_dn_read_config(struct pci_dn *pdn, int where, int size, u32 *val);
+int rtas_pci_dn_write_config(struct pci_dn *pdn, int where, int size, u32 val);
+
#ifdef CONFIG_EEH
void eeh_addr_cache_insert_dev(struct pci_dev *dev);
@@ -44,8 +47,6 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
int eeh_pci_enable(struct eeh_pe *pe, int function);
int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed);
void eeh_save_bars(struct eeh_dev *edev);
-int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
-int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
void eeh_pe_state_mark(struct eeh_pe *pe, int state);
void eeh_pe_mark_isolated(struct eeh_pe *pe);
void eeh_pe_state_clear(struct eeh_pe *pe, int state, bool include_passed);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index a5f36546a052..d13d8fdc3411 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -514,4 +514,10 @@ u64 ps3_get_spe_id(void *arg);
void ps3_early_mm_init(void);
+#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
+void udbg_shutdown_ps3gelic(void);
+#else
+static inline void udbg_shutdown_ps3gelic(void) {}
+#endif
+
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4ae4ab9090a2..7fd09f25452d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1361,6 +1361,7 @@
#define PVR_POWER8E 0x004B
#define PVR_POWER8NVL 0x004C
#define PVR_POWER8 0x004D
+#define PVR_HX_C2000 0x0066
#define PVR_POWER9 0x004E
#define PVR_POWER10 0x0080
#define PVR_BE 0x0070
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
deleted file mode 100644
index 74fba29e9491..000000000000
--- a/arch/powerpc/include/asm/reg_a2.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Register definitions specific to the A2 core
- *
- * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
- */
-
-#ifndef __ASM_POWERPC_REG_A2_H__
-#define __ASM_POWERPC_REG_A2_H__
-
-#include <asm/asm-const.h>
-
-#define SPRN_TENSR 0x1b5
-#define SPRN_TENS 0x1b6 /* Thread ENable Set */
-#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
-
-#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */
-#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */
-#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */
-#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */
-#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */
-#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */
-#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */
-
-#define SPRN_IAR 0x372
-
-#define SPRN_IUCR0 0x3f3
-#define IUCR0_ICBI_ACK 0x1000
-
-#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */
-
-#define A2_IERAT_SIZE 16
-#define A2_DERAT_SIZE 32
-
-/* A2 MMUCR0 bits */
-#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */
-#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */
-#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */
-#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */
-#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */
-#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */
-#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */
-#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */
-#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */
-#define MMUCR0_TID_MASK 0x000000ff /* TID field */
-
-/* A2 MMUCR1 bits */
-#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */
-#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */
-#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/
-#define MMUCR1_CEE 0x10000000 /* Change exception enable */
-#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */
-#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/
-#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */
-#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */
-#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */
-#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */
-#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */
-#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */
-#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */
-
-/* A2 MMUCR2 bits */
-#define MMUCR2_PSSEL_SHIFT 4
-
-/* A2 MMUCR3 bits */
-#define MMUCR3_THID 0x0000000f /* Thread ID */
-
-/* *** ERAT TLB bits definitions */
-#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000)
-#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00)
-#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000)
-#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400)
-#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800)
-#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00)
-#define TLB0_V ASM_CONST(0x0000000000000200)
-#define TLB0_X ASM_CONST(0x0000000000000100)
-#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0)
-#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010)
-#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030)
-#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050)
-#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070)
-#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0)
-#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f)
-#define TLB0_THDID_0 ASM_CONST(0x0000000000000001)
-#define TLB0_THDID_1 ASM_CONST(0x0000000000000002)
-#define TLB0_THDID_2 ASM_CONST(0x0000000000000004)
-#define TLB0_THDID_3 ASM_CONST(0x0000000000000008)
-#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f)
-
-#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000)
-#define TLB1_U0 ASM_CONST(0x0008000000000000)
-#define TLB1_U1 ASM_CONST(0x0004000000000000)
-#define TLB1_U2 ASM_CONST(0x0002000000000000)
-#define TLB1_U3 ASM_CONST(0x0001000000000000)
-#define TLB1_R ASM_CONST(0x0000800000000000)
-#define TLB1_C ASM_CONST(0x0000400000000000)
-#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000)
-#define TLB1_W ASM_CONST(0x0000000000000800)
-#define TLB1_I ASM_CONST(0x0000000000000400)
-#define TLB1_M ASM_CONST(0x0000000000000200)
-#define TLB1_G ASM_CONST(0x0000000000000100)
-#define TLB1_E ASM_CONST(0x0000000000000080)
-#define TLB1_VF ASM_CONST(0x0000000000000040)
-#define TLB1_UX ASM_CONST(0x0000000000000020)
-#define TLB1_SX ASM_CONST(0x0000000000000010)
-#define TLB1_UW ASM_CONST(0x0000000000000008)
-#define TLB1_SW ASM_CONST(0x0000000000000004)
-#define TLB1_UR ASM_CONST(0x0000000000000002)
-#define TLB1_SR ASM_CONST(0x0000000000000001)
-
-/* A2 erativax attributes definitions */
-#define ERATIVAX_RS_IS_ALL 0x000
-#define ERATIVAX_RS_IS_TID 0x040
-#define ERATIVAX_RS_IS_CLASS 0x080
-#define ERATIVAX_RS_IS_FULLMATCH 0x0c0
-#define ERATIVAX_CLASS_00 0x000
-#define ERATIVAX_CLASS_01 0x010
-#define ERATIVAX_CLASS_10 0x020
-#define ERATIVAX_CLASS_11 0x030
-#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1)
-#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1)
-#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1)
-#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1)
-#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1)
-
-/* A2 eratilx attributes definitions */
-#define ERATILX_T_ALL 0
-#define ERATILX_T_TID 1
-#define ERATILX_T_TGS 2
-#define ERATILX_T_FULLMATCH 3
-#define ERATILX_T_CLASS0 4
-#define ERATILX_T_CLASS1 5
-#define ERATILX_T_CLASS2 6
-#define ERATILX_T_CLASS3 7
-
-/* XUCR0 bits */
-#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */
-#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */
-#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */
-#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */
-
-/* A2 CCR0 register */
-#define A2_CCR0_PME_DISABLED 0x00000000
-#define A2_CCR0_PME_SLEEP 0x40000000
-#define A2_CCR0_PME_RVW 0x80000000
-#define A2_CCR0_PME_DISABLED2 0xc0000000
-
-/* A2 CCR2 register */
-#define A2_CCR2_ERAT_ONLY_MODE 0x00000001
-#define A2_CCR2_ENABLE_ICSWX 0x00000002
-#define A2_CCR2_ENABLE_PC 0x20000000
-#define A2_CCR2_ENABLE_TRACE 0x40000000
-
-#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index c697c3c74694..9bb2210c8d44 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -3,6 +3,7 @@
#define _POWERPC_RTAS_H
#ifdef __KERNEL__
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/rtas-types.h>
@@ -201,12 +202,25 @@ typedef struct {
/* Memory set aside for sys_rtas to use with calls that need a work area. */
#define RTAS_USER_REGION_SIZE (64 * 1024)
-/* RTAS return status codes */
-#define RTAS_HARDWARE_ERROR -1 /* Hardware Error */
-#define RTAS_BUSY -2 /* RTAS Busy */
-#define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */
-#define RTAS_EXTENDED_DELAY_MIN 9900
-#define RTAS_EXTENDED_DELAY_MAX 9905
+/*
+ * Common RTAS function return values, derived from the table "RTAS
+ * Status Word Values" in PAPR+ v2.13 7.2.8: "Return Codes". If a
+ * function can return a value in this table then generally it has the
+ * meaning listed here. More extended commentary in the documentation
+ * for rtas_call().
+ *
+ * RTAS functions may use negative and positive numbers not in this
+ * set for function-specific error and success conditions,
+ * respectively.
+ */
+#define RTAS_SUCCESS 0 /* Success. */
+#define RTAS_HARDWARE_ERROR -1 /* Hardware or other unspecified error. */
+#define RTAS_BUSY -2 /* Retry immediately. */
+#define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */
+#define RTAS_UNEXPECTED_STATE_CHANGE -7 /* Seems limited to EEH and slot reset. */
+#define RTAS_EXTENDED_DELAY_MIN 9900 /* Retry after delaying for ~1ms. */
+#define RTAS_EXTENDED_DELAY_MAX 9905 /* Retry after delaying for ~100s. */
+#define RTAS_ML_ISOLATION_ERROR -9000 /* Multi-level isolation error. */
/* statuses specific to ibm,suspend-me */
#define RTAS_SUSPEND_ABORTED 9000 /* Suspension aborted */
@@ -268,7 +282,7 @@ typedef struct {
#define RTAS_TYPE_DEALLOC 0xE3
#define RTAS_TYPE_DUMP 0xE4
#define RTAS_TYPE_HOTPLUG 0xE5
-/* I don't add PowerMGM events right now, this is a different topic */
+/* I don't add PowerMGM events right now, this is a different topic */
#define RTAS_TYPE_PMGM_POWER_SW_ON 0x60
#define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61
#define RTAS_TYPE_PMGM_LID_OPEN 0x62
@@ -408,44 +422,41 @@ static inline bool rtas_function_implemented(const rtas_fn_handle_t handle)
{
return rtas_function_token(handle) != RTAS_UNKNOWN_SERVICE;
}
-extern int rtas_token(const char *service);
-extern int rtas_service_present(const char *service);
-extern int rtas_call(int token, int, int, int *, ...);
+int rtas_token(const char *service);
+int rtas_call(int token, int nargs, int nret, int *outputs, ...);
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
int nret, ...);
-extern void __noreturn rtas_restart(char *cmd);
-extern void rtas_power_off(void);
-extern void __noreturn rtas_halt(void);
-extern void rtas_os_term(char *str);
+void __noreturn rtas_restart(char *cmd);
+void rtas_power_off(void);
+void __noreturn rtas_halt(void);
+void rtas_os_term(char *str);
void rtas_activate_firmware(void);
-extern int rtas_get_sensor(int sensor, int index, int *state);
-extern int rtas_get_sensor_fast(int sensor, int index, int *state);
-extern int rtas_get_power_level(int powerdomain, int *level);
-extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
-extern bool rtas_indicator_present(int token, int *maxindex);
-extern int rtas_set_indicator(int indicator, int index, int new_value);
-extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
-extern void rtas_progress(char *s, unsigned short hex);
+int rtas_get_sensor(int sensor, int index, int *state);
+int rtas_get_sensor_fast(int sensor, int index, int *state);
+int rtas_get_power_level(int powerdomain, int *level);
+int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+bool rtas_indicator_present(int token, int *maxindex);
+int rtas_set_indicator(int indicator, int index, int new_value);
+int rtas_set_indicator_fast(int indicator, int index, int new_value);
+void rtas_progress(char *s, unsigned short hex);
int rtas_ibm_suspend_me(int *fw_status);
int rtas_error_rc(int rtas_rc);
struct rtc_time;
-extern time64_t rtas_get_boot_time(void);
-extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
-extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
+time64_t rtas_get_boot_time(void);
+void rtas_get_rtc_time(struct rtc_time *rtc_time);
+int rtas_set_rtc_time(struct rtc_time *rtc_time);
-extern unsigned int rtas_busy_delay_time(int status);
+unsigned int rtas_busy_delay_time(int status);
bool rtas_busy_delay(int status);
-extern int early_init_dt_scan_rtas(unsigned long node,
- const char *uname, int depth, void *data);
+int early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data);
-extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
#ifdef CONFIG_PPC_PSERIES
extern time64_t last_rtas_event;
-extern int clobbering_unread_rtas_event(void);
-extern void post_mobility_fixup(void);
+int clobbering_unread_rtas_event(void);
int rtas_syscall_dispatch_ibm_suspend_me(u64 handle);
#else
static inline int clobbering_unread_rtas_event(void) { return 0; }
@@ -456,14 +467,14 @@ static inline int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
#endif
#ifdef CONFIG_PPC_RTAS_DAEMON
-extern void rtas_cancel_event_scan(void);
+void rtas_cancel_event_scan(void);
#else
static inline void rtas_cancel_event_scan(void) { }
#endif
/* Error types logged. */
#define ERR_FLAG_ALREADY_LOGGED 0x0
-#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
+#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
#define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */
#define ERR_TYPE_KERNEL_PANIC 0x4 /* from die()/panic() */
#define ERR_TYPE_KERNEL_PANIC_GZ 0x8 /* ditto, compressed */
@@ -473,7 +484,7 @@ static inline void rtas_cancel_event_scan(void) { }
(ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC | ERR_TYPE_KERNEL_PANIC_GZ)
#define RTAS_DEBUG KERN_DEBUG "RTAS: "
-
+
#define RTAS_ERROR_LOG_MAX 2048
/*
@@ -481,7 +492,7 @@ static inline void rtas_cancel_event_scan(void) { }
* for all rtas calls that require an error buffer argument.
* This includes 'check-exception' and 'rtas-last-error'.
*/
-extern int rtas_get_error_log_max(void);
+int rtas_get_error_log_max(void);
/* Event Scan Parameters */
#define EVENT_SCAN_ALL_EVENTS 0xf0000000
@@ -502,6 +513,8 @@ extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
/* RMO buffer reserved for user-space RTAS use */
extern unsigned long rtas_rmo_buf;
+extern struct mutex rtas_ibm_get_vpd_lock;
+
#define GLOBAL_INTERRUPT_QUEUE 9005
/**
@@ -520,8 +533,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
-extern void rtas_give_timebase(void);
-extern void rtas_take_timebase(void);
+void rtas_give_timebase(void);
+void rtas_take_timebase(void);
#ifdef CONFIG_PPC_RTAS
static inline int page_is_rtas_user_buf(unsigned long pfn)
@@ -534,7 +547,7 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
/* Not the best place to put pSeries_coalesce_init, will be fixed when we
* move some of the rtas suspend-me stuff to pseries */
-extern void pSeries_coalesce_init(void);
+void pSeries_coalesce_init(void);
void rtas_initialize(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
@@ -542,8 +555,6 @@ static inline void pSeries_coalesce_init(void) { }
static inline void rtas_initialize(void) { }
#endif
-extern int call_rtas(const char *, int, int, unsigned long *, ...);
-
#ifdef CONFIG_HV_PERF_CTRS
void read_24x7_sys_info(void);
#else
diff --git a/arch/powerpc/include/uapi/asm/papr-miscdev.h b/arch/powerpc/include/uapi/asm/papr-miscdev.h
new file mode 100644
index 000000000000..49a2a270b7f3
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-miscdev.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_MISCDEV_H_
+#define _UAPI_PAPR_MISCDEV_H_
+
+enum {
+ PAPR_MISCDEV_IOC_ID = 0xb2,
+};
+
+#endif /* _UAPI_PAPR_MISCDEV_H_ */
diff --git a/arch/powerpc/include/uapi/asm/papr-sysparm.h b/arch/powerpc/include/uapi/asm/papr-sysparm.h
new file mode 100644
index 000000000000..9f9a0f267ea5
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-sysparm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_SYSPARM_H_
+#define _UAPI_PAPR_SYSPARM_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+#include <asm/papr-miscdev.h>
+
+enum {
+ PAPR_SYSPARM_MAX_INPUT = 1024,
+ PAPR_SYSPARM_MAX_OUTPUT = 4000,
+};
+
+struct papr_sysparm_io_block {
+ __u32 parameter;
+ __u16 length;
+ char data[PAPR_SYSPARM_MAX_OUTPUT];
+};
+
+/**
+ * PAPR_SYSPARM_IOC_GET - Retrieve the value of a PAPR system parameter.
+ *
+ * Uses _IOWR because of one corner case: Retrieving the value of the
+ * "OS Service Entitlement Status" parameter (60) requires the caller
+ * to supply input data (a date string) in the buffer passed to
+ * firmware. So the @length and @data of the incoming
+ * papr_sysparm_io_block are always used to initialize the work area
+ * supplied to ibm,get-system-parameter. No other parameters are known
+ * to parameterize the result this way, and callers are encouraged
+ * (but not required) to zero-initialize @length and @data in the
+ * common case.
+ *
+ * On error the contents of the ioblock are indeterminate.
+ *
+ * Return:
+ * 0: Success; @length is the length of valid data in @data, not to exceed @PAPR_SYSPARM_MAX_OUTPUT.
+ * -EIO: Platform error. (-1)
+ * -EINVAL: Incorrect data length or format. (-9999)
+ * -EPERM: The calling partition is not allowed to access this parameter. (-9002)
+ * -EOPNOTSUPP: Parameter not supported on this platform (-3)
+ */
+#define PAPR_SYSPARM_IOC_GET _IOWR(PAPR_MISCDEV_IOC_ID, 1, struct papr_sysparm_io_block)
+
+/**
+ * PAPR_SYSPARM_IOC_SET - Update the value of a PAPR system parameter.
+ *
+ * The contents of the ioblock are unchanged regardless of success.
+ *
+ * Return:
+ * 0: Success; the parameter has been updated.
+ * -EIO: Platform error. (-1)
+ * -EINVAL: Incorrect data length or format. (-9999)
+ * -EPERM: The calling partition is not allowed to access this parameter. (-9002)
+ * -EOPNOTSUPP: Parameter not supported on this platform (-3)
+ */
+#define PAPR_SYSPARM_IOC_SET _IOW(PAPR_MISCDEV_IOC_ID, 2, struct papr_sysparm_io_block)
+
+#endif /* _UAPI_PAPR_SYSPARM_H_ */
diff --git a/arch/powerpc/include/uapi/asm/papr-vpd.h b/arch/powerpc/include/uapi/asm/papr-vpd.h
new file mode 100644
index 000000000000..1c88e87cb420
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr-vpd.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PAPR_VPD_H_
+#define _UAPI_PAPR_VPD_H_
+
+#include <asm/ioctl.h>
+#include <asm/papr-miscdev.h>
+
+struct papr_location_code {
+ /*
+ * PAPR+ v2.13 12.3.2.4 Converged Location Code Rules - Length
+ * Restrictions. 79 characters plus nul.
+ */
+ char str[80];
+};
+
+/*
+ * ioctl for /dev/papr-vpd. Returns a VPD handle fd corresponding to
+ * the location code.
+ */
+#define PAPR_VPD_IOC_CREATE_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 0, struct papr_location_code)
+
+#endif /* _UAPI_PAPR_VPD_H_ */
diff --git a/arch/powerpc/kernel/cpu_specs_book3s_64.h b/arch/powerpc/kernel/cpu_specs_book3s_64.h
index c370c1b804a9..3ff9757df4c0 100644
--- a/arch/powerpc/kernel/cpu_specs_book3s_64.h
+++ b/arch/powerpc/kernel/cpu_specs_book3s_64.h
@@ -238,6 +238,21 @@ static struct cpu_spec cpu_specs[] __initdata = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* 2.07-compliant processor, HeXin C2000 processor */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00660000,
+ .cpu_name = "HX-C2000",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* 3.00-compliant processor, i.e. Power9 "architected" mode */
.pvr_mask = 0xffffffff,
.pvr_value = 0x0f000005,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e97a0fd0ae90..6f6801da9dc1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -20,9 +20,9 @@
#include <asm/setup.h>
#include <asm/cpu_setup.h>
-static struct cpu_spec the_cpu_spec __read_mostly;
+static struct cpu_spec the_cpu_spec __ro_after_init;
-struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
+struct cpu_spec *cur_cpu_spec __ro_after_init = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 7ab4c8c0f1ab..dcf0591ad3c2 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -14,7 +14,6 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/thread_info.h>
-#include <asm/reg_a2.h>
#include <asm/exception-64e.h>
#include <asm/bug.h>
#include <asm/irqflags.h>
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index eddc031c4b95..7e793b503e29 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
@@ -70,14 +71,33 @@ struct rtas_filter {
* ppc64le, and we want to keep it that way. It does
* not make sense for this to be set when @filter
* is NULL.
+ * @lock: Pointer to an optional dedicated per-function mutex. This
+ * should be set for functions that require multiple calls in
+ * sequence to complete a single operation, and such sequences
+ * will disrupt each other if allowed to interleave. Users of
+ * this function are required to hold the associated lock for
+ * the duration of the call sequence. Add an explanatory
+ * comment to the function table entry if setting this member.
*/
struct rtas_function {
s32 token;
const bool banned_for_syscall_on_le:1;
const char * const name;
const struct rtas_filter *filter;
+ struct mutex *lock;
};
+/*
+ * Per-function locks for sequence-based RTAS functions.
+ */
+static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock);
+static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
+static DEFINE_MUTEX(rtas_ibm_get_indices_lock);
+static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock);
+static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
+static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
+DEFINE_MUTEX(rtas_ibm_get_vpd_lock);
+
static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__CHECK_EXCEPTION] = {
.name = "check-exception",
@@ -125,6 +145,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = -1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ as of v2.13 doesn't explicitly impose any
+ * restriction, but this typically requires multiple
+ * calls before success, and there's no reason to
+ * allow sequences to interleave.
+ */
+ .lock = &rtas_ibm_activate_firmware_lock,
},
[RTAS_FNIDX__IBM_CBE_START_PTCAL] = {
.name = "ibm,cbe-start-ptcal",
@@ -196,6 +223,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 1, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ v2.13 R1–7.3.19–3 is explicit that the OS
+ * must not call ibm,get-dynamic-sensor-state with
+ * different inputs until a non-retry status has been
+ * returned.
+ */
+ .lock = &rtas_ibm_get_dynamic_sensor_state_lock,
},
[RTAS_FNIDX__IBM_GET_INDICES] = {
.name = "ibm,get-indices",
@@ -203,6 +237,12 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 2, .size_idx1 = 3,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ v2.13 R1–7.3.17–2 says that the OS must not
+ * interleave ibm,get-indices call sequences with
+ * different inputs.
+ */
+ .lock = &rtas_ibm_get_indices_lock,
},
[RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY] = {
.name = "ibm,get-rio-topology",
@@ -220,6 +260,11 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 0, .size_idx1 = -1,
.buf_idx2 = 1, .size_idx2 = 2,
},
+ /*
+ * PAPR+ v2.13 R1–7.3.20–4 indicates that sequences
+ * should not be allowed to interleave.
+ */
+ .lock = &rtas_ibm_get_vpd_lock,
},
[RTAS_FNIDX__IBM_GET_XIVE] = {
.name = "ibm,get-xive",
@@ -239,6 +284,11 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 2, .size_idx1 = 3,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ v2.13 R1–7.3.26–6 says the OS should allow
+ * only one call sequence in progress at a time.
+ */
+ .lock = &rtas_ibm_lpar_perftools_lock,
},
[RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE] = {
.name = "ibm,manage-flash-image",
@@ -277,6 +327,14 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 0, .size_idx1 = 1,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * This follows a sequence-based pattern similar to
+ * ibm,get-vpd et al. Since PAPR+ restricts
+ * interleaving call sequences for other functions of
+ * this style, assume the restriction applies here,
+ * even though it's not explicit in the spec.
+ */
+ .lock = &rtas_ibm_physical_attestation_lock,
},
[RTAS_FNIDX__IBM_PLATFORM_DUMP] = {
.name = "ibm,platform-dump",
@@ -284,6 +342,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 4, .size_idx1 = 5,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ v2.13 7.3.3.4.1 indicates that concurrent
+ * sequences of ibm,platform-dump are allowed if they
+ * are operating on different dump tags. So leave the
+ * lock pointer unset for now. This may need
+ * reconsideration if kernel-internal users appear.
+ */
},
[RTAS_FNIDX__IBM_POWER_OFF_UPS] = {
.name = "ibm,power-off-ups",
@@ -326,6 +391,12 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
.buf_idx1 = 2, .size_idx1 = -1,
.buf_idx2 = -1, .size_idx2 = -1,
},
+ /*
+ * PAPR+ v2.13 R1–7.3.18–3 says the OS must not call
+ * this function with different inputs until a
+ * non-retry status has been returned.
+ */
+ .lock = &rtas_ibm_set_dynamic_indicator_lock,
},
[RTAS_FNIDX__IBM_SET_EEH_OPTION] = {
.name = "ibm,set-eeh-option",
@@ -454,6 +525,11 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
},
};
+#define for_each_rtas_function(funcp) \
+ for (funcp = &rtas_function_table[0]; \
+ funcp < &rtas_function_table[ARRAY_SIZE(rtas_function_table)]; \
+ ++funcp)
+
/*
* Nearly all RTAS calls need to be serialized. All uses of the
* default rtas_args block must hold rtas_lock.
@@ -525,10 +601,10 @@ static DEFINE_XARRAY(rtas_token_to_function_xarray);
static int __init rtas_token_to_function_xarray_init(void)
{
+ const struct rtas_function *func;
int err = 0;
- for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) {
- const struct rtas_function *func = &rtas_function_table[i];
+ for_each_rtas_function(func) {
const s32 token = func->token;
if (token == RTAS_UNKNOWN_SERVICE)
@@ -544,6 +620,21 @@ static int __init rtas_token_to_function_xarray_init(void)
}
arch_initcall(rtas_token_to_function_xarray_init);
+/*
+ * For use by sys_rtas(), where the token value is provided by user
+ * space and we don't want to warn on failed lookups.
+ */
+static const struct rtas_function *rtas_token_to_function_untrusted(s32 token)
+{
+ return xa_load(&rtas_token_to_function_xarray, token);
+}
+
+/*
+ * Reverse lookup for deriving the function descriptor from a
+ * known-good token value in contexts where the former is not already
+ * available. @token must be valid, e.g. derived from the result of a
+ * prior lookup against the function table.
+ */
static const struct rtas_function *rtas_token_to_function(s32 token)
{
const struct rtas_function *func;
@@ -551,12 +642,22 @@ static const struct rtas_function *rtas_token_to_function(s32 token)
if (WARN_ONCE(token < 0, "invalid token %d", token))
return NULL;
- func = xa_load(&rtas_token_to_function_xarray, token);
-
- if (WARN_ONCE(!func, "unexpected failed lookup for token %d", token))
- return NULL;
+ func = rtas_token_to_function_untrusted(token);
+ if (func)
+ return func;
+ /*
+ * Fall back to linear scan in case the reverse mapping hasn't
+ * been initialized yet.
+ */
+ if (xa_empty(&rtas_token_to_function_xarray)) {
+ for_each_rtas_function(func) {
+ if (func->token == token)
+ return func;
+ }
+ }
- return func;
+ WARN_ONCE(true, "unexpected failed lookup for token %d", token);
+ return NULL;
}
/* This is here deliberately so it's only used in this file */
@@ -570,28 +671,25 @@ static void __do_enter_rtas(struct rtas_args *args)
static void __do_enter_rtas_trace(struct rtas_args *args)
{
- const char *name = NULL;
+ const struct rtas_function *func = rtas_token_to_function(be32_to_cpu(args->token));
- if (args == &rtas_args)
- lockdep_assert_held(&rtas_lock);
/*
- * If the tracepoints that consume the function name aren't
- * active, avoid the lookup.
+ * If there is a per-function lock, it must be held by the
+ * caller.
*/
- if ((trace_rtas_input_enabled() || trace_rtas_output_enabled())) {
- const s32 token = be32_to_cpu(args->token);
- const struct rtas_function *func = rtas_token_to_function(token);
+ if (func->lock)
+ lockdep_assert_held(func->lock);
- name = func->name;
- }
+ if (args == &rtas_args)
+ lockdep_assert_held(&rtas_lock);
- trace_rtas_input(args, name);
+ trace_rtas_input(args, func->name);
trace_rtas_ll_entry(args);
__do_enter_rtas(args);
trace_rtas_ll_exit(args);
- trace_rtas_output(args, name);
+ trace_rtas_output(args, func->name);
}
static void do_enter_rtas(struct rtas_args *args)
@@ -670,7 +768,7 @@ static void call_rtas_display_status_delay(char c)
static int pending_newline = 0; /* did last write end with unprinted newline? */
static int width = 16;
- if (c == '\n') {
+ if (c == '\n') {
while (width-- > 0)
call_rtas_display_status(' ');
width = 16;
@@ -680,7 +778,7 @@ static void call_rtas_display_status_delay(char c)
if (pending_newline) {
call_rtas_display_status('\r');
call_rtas_display_status('\n');
- }
+ }
pending_newline = 0;
if (width--) {
call_rtas_display_status(c);
@@ -820,7 +918,7 @@ void rtas_progress(char *s, unsigned short hex)
else
rtas_call(display_character, 1, 1, NULL, '\r');
}
-
+
if (row_width)
width = row_width[current_line];
else
@@ -840,9 +938,9 @@ void rtas_progress(char *s, unsigned short hex)
spin_unlock(&progress_lock);
return;
}
-
+
/* RTAS wants CR-LF, not just LF */
-
+
if (*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
@@ -852,7 +950,7 @@ void rtas_progress(char *s, unsigned short hex)
*/
rtas_call(display_character, 1, 1, NULL, *os);
}
-
+
if (row_width)
width = row_width[current_line];
else
@@ -861,15 +959,15 @@ void rtas_progress(char *s, unsigned short hex)
width--;
rtas_call(display_character, 1, 1, NULL, *os);
}
-
+
os++;
-
+
/* if we overwrite the screen length */
if (width <= 0)
while ((*os != 0) && (*os != '\n') && (*os != '\r'))
os++;
}
-
+
spin_unlock(&progress_lock);
}
EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
@@ -900,11 +998,6 @@ int rtas_token(const char *service)
}
EXPORT_SYMBOL_GPL(rtas_token);
-int rtas_service_present(const char *service)
-{
- return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
-}
-
#ifdef CONFIG_RTAS_ERROR_LOGGING
static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX;
@@ -1638,10 +1731,14 @@ void rtas_activate_firmware(void)
return;
}
+ mutex_lock(&rtas_ibm_activate_firmware_lock);
+
do {
fwrc = rtas_call(token, 0, 1, NULL);
} while (rtas_busy_delay(fwrc));
+ mutex_unlock(&rtas_ibm_activate_firmware_lock);
+
if (fwrc)
pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
}
@@ -1713,24 +1810,18 @@ static bool in_rmo_buf(u32 base, u32 end)
end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
}
-static bool block_rtas_call(int token, int nargs,
+static bool block_rtas_call(const struct rtas_function *func, int nargs,
struct rtas_args *args)
{
- const struct rtas_function *func;
const struct rtas_filter *f;
- const bool is_platform_dump = token == rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP);
- const bool is_config_conn = token == rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
+ const bool is_platform_dump =
+ func == &rtas_function_table[RTAS_FNIDX__IBM_PLATFORM_DUMP];
+ const bool is_config_conn =
+ func == &rtas_function_table[RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR];
u32 base, size, end;
/*
- * If this token doesn't correspond to a function the kernel
- * understands, you're not allowed to call it.
- */
- func = rtas_token_to_function(token);
- if (!func)
- goto err;
- /*
- * And only functions with filters attached are allowed.
+ * Only functions with filters attached are allowed.
*/
f = func->filter;
if (!f)
@@ -1787,14 +1878,15 @@ static bool block_rtas_call(int token, int nargs,
return false;
err:
pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
- pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
- token, nargs, current->comm);
+ pr_err_ratelimited("sys_rtas: %s nargs=%d (called by %s)\n",
+ func->name, nargs, current->comm);
return true;
}
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
+ const struct rtas_function *func;
struct pin_cookie cookie;
struct rtas_args args;
unsigned long flags;
@@ -1824,13 +1916,18 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
nargs * sizeof(rtas_arg_t)) != 0)
return -EFAULT;
- if (token == RTAS_UNKNOWN_SERVICE)
+ /*
+ * If this token doesn't correspond to a function the kernel
+ * understands, you're not allowed to call it.
+ */
+ func = rtas_token_to_function_untrusted(token);
+ if (!func)
return -EINVAL;
args.rets = &args.args[nargs];
memset(args.rets, 0, nret * sizeof(rtas_arg_t));
- if (block_rtas_call(token, nargs, &args))
+ if (block_rtas_call(func, nargs, &args))
return -EINVAL;
if (token_is_restricted_errinjct(token)) {
@@ -1863,6 +1960,15 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
buff_copy = get_errorlog_buffer();
+ /*
+ * If this function has a mutex assigned to it, we must
+ * acquire it to avoid interleaving with any kernel-based uses
+ * of the same function. Kernel-based sequences acquire the
+ * appropriate mutex explicitly.
+ */
+ if (func->lock)
+ mutex_lock(func->lock);
+
raw_spin_lock_irqsave(&rtas_lock, flags);
cookie = lockdep_pin_lock(&rtas_lock);
@@ -1878,6 +1984,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
lockdep_unpin_lock(&rtas_lock, cookie);
raw_spin_unlock_irqrestore(&rtas_lock, flags);
+ if (func->lock)
+ mutex_unlock(func->lock);
+
if (buff_copy) {
if (errbuf)
log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index e1fdc7473b72..fccf96e897f6 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -43,7 +43,7 @@ static inline int config_access_valid(struct pci_dn *dn, int where)
return 0;
}
-int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
+int rtas_pci_dn_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
int returnval = -1;
unsigned long buid, addr;
@@ -87,7 +87,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
pdn = pci_get_pdn_by_devfn(bus, devfn);
/* Validity of pdn is checked in here */
- ret = rtas_read_config(pdn, where, size, val);
+ ret = rtas_pci_dn_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -95,7 +95,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
return ret;
}
-int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
+int rtas_pci_dn_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
unsigned long buid, addr;
int ret;
@@ -134,7 +134,7 @@ static int rtas_pci_write_config(struct pci_bus *bus,
pdn = pci_get_pdn_by_devfn(bus, devfn);
/* Validity of pdn is checked in here. */
- return rtas_write_config(pdn, where, size, val);
+ return rtas_pci_dn_write_config(pdn, where, size, val);
}
static struct pci_ops rtas_pci_ops = {
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ab691c89d787..693334c20d07 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -77,10 +77,10 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 };
#endif
struct task_struct *secondary_current;
-bool has_big_cores;
-bool coregroup_enabled;
-bool thread_group_shares_l2;
-bool thread_group_shares_l3;
+bool has_big_cores __ro_after_init;
+bool coregroup_enabled __ro_after_init;
+bool thread_group_shares_l2 __ro_after_init;
+bool thread_group_shares_l3 __ro_after_init;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
@@ -93,15 +93,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
EXPORT_SYMBOL_GPL(has_big_cores);
-enum {
-#ifdef CONFIG_SCHED_SMT
- smt_idx,
-#endif
- cache_idx,
- mc_idx,
- die_idx,
-};
-
#define MAX_THREAD_LIST_SIZE 8
#define THREAD_GROUP_SHARE_L1 1
#define THREAD_GROUP_SHARE_L2_L3 2
@@ -987,7 +978,7 @@ static int __init init_thread_group_cache_map(int cpu, int cache_property)
return 0;
}
-static bool shared_caches;
+static bool shared_caches __ro_after_init;
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymmetric SMT dependency */
@@ -1004,6 +995,13 @@ static int powerpc_smt_flags(void)
#endif
/*
+ * On shared processor LPARs scheduled on a big core (which has two or more
+ * independent thread groups per core), prefer lower numbered CPUs, so
+ * that workload consolidates to lesser number of cores.
+ */
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
+
+/*
* P9 has a slightly odd architecture where pairs of cores share an L2 cache.
* This topology makes it *much* cheaper to migrate tasks between adjacent cores
* since the migrated task remains cache hot. We want to take advantage of this
@@ -1011,9 +1009,20 @@ static int powerpc_smt_flags(void)
*/
static int powerpc_shared_cache_flags(void)
{
+ if (static_branch_unlikely(&splpar_asym_pack))
+ return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING;
+
return SD_SHARE_PKG_RESOURCES;
}
+static int powerpc_shared_proc_flags(void)
+{
+ if (static_branch_unlikely(&splpar_asym_pack))
+ return SD_ASYM_PACKING;
+
+ return 0;
+}
+
/*
* We can't just pass cpu_l2_cache_mask() directly because
* returns a non-const pointer and the compiler barfs on that.
@@ -1037,6 +1046,10 @@ static struct cpumask *cpu_coregroup_mask(int cpu)
static bool has_coregroup_support(void)
{
+ /* Coregroup identification not available on shared systems */
+ if (is_shared_processor())
+ return 0;
+
return coregroup_enabled;
}
@@ -1045,16 +1058,6 @@ static const struct cpumask *cpu_mc_mask(int cpu)
return cpu_coregroup_mask(cpu);
}
-static struct sched_domain_topology_level powerpc_topology[] = {
-#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
-#endif
- { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
- { cpu_mc_mask, SD_INIT_NAME(MC) },
- { cpu_cpu_mask, SD_INIT_NAME(PKG) },
- { NULL, },
-};
-
static int __init init_big_cores(void)
{
int cpu;
@@ -1682,43 +1685,45 @@ void start_secondary(void *unused)
BUG();
}
-static void __init fixup_topology(void)
+static struct sched_domain_topology_level powerpc_topology[6];
+
+static void __init build_sched_topology(void)
{
- int i;
+ int i = 0;
+
+ if (is_shared_processor() && has_big_cores)
+ static_branch_enable(&splpar_asym_pack);
#ifdef CONFIG_SCHED_SMT
if (has_big_cores) {
pr_info("Big cores detected but using small core scheduling\n");
- powerpc_topology[smt_idx].mask = smallcore_smt_mask;
+ powerpc_topology[i++] = (struct sched_domain_topology_level){
+ smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
+ };
+ } else {
+ powerpc_topology[i++] = (struct sched_domain_topology_level){
+ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
+ };
}
#endif
+ if (shared_caches) {
+ powerpc_topology[i++] = (struct sched_domain_topology_level){
+ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
+ };
+ }
+ if (has_coregroup_support()) {
+ powerpc_topology[i++] = (struct sched_domain_topology_level){
+ cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
+ };
+ }
+ powerpc_topology[i++] = (struct sched_domain_topology_level){
+ cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
+ };
- if (!has_coregroup_support())
- powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
-
- /*
- * Try to consolidate topology levels here instead of
- * allowing scheduler to degenerate.
- * - Dont consolidate if masks are different.
- * - Dont consolidate if sd_flags exists and are different.
- */
- for (i = 1; i <= die_idx; i++) {
- if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
- continue;
-
- if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
- powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
- continue;
-
- if (!powerpc_topology[i - 1].sd_flags)
- powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
+ /* There must be one trailing NULL entry left. */
+ BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
- powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
- powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
-#ifdef CONFIG_SCHED_DEBUG
- powerpc_topology[i].name = powerpc_topology[i + 1].name;
-#endif
- }
+ set_sched_topology(powerpc_topology);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -1733,9 +1738,20 @@ void __init smp_cpus_done(unsigned int max_cpus)
smp_ops->bringup_done();
dump_numa_cpu_topology();
+ build_sched_topology();
+}
- fixup_topology();
- set_sched_topology(powerpc_topology);
+/*
+ * For asym packing, by default lower numbered CPU has higher priority.
+ * On shared processors, pack to lower numbered core. However avoid moving
+ * between thread_groups within the same core.
+ */
+int arch_asym_cpu_priority(int cpu)
+{
+ if (static_branch_unlikely(&splpar_asym_pack))
+ return -cpu / threads_per_core;
+
+ return -cpu;
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c
index 16ee3baaf09a..50fa8fc9ef95 100644
--- a/arch/powerpc/kernel/swsusp_64.c
+++ b/arch/powerpc/kernel/swsusp_64.c
@@ -11,6 +11,8 @@
#include <linux/interrupt.h>
#include <linux/nmi.h>
+void do_after_copyback(void);
+
void do_after_copyback(void)
{
iommu_restore();
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 7fab411378f2..17173b82ca21 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -543,3 +543,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
index 40677416d7b2..76dbe9fd2c0f 100644
--- a/arch/powerpc/kernel/trace/ftrace_entry.S
+++ b/arch/powerpc/kernel/trace/ftrace_entry.S
@@ -162,7 +162,6 @@ _GLOBAL(ftrace_regs_caller)
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
- nop
ftrace_regs_exit 1
_GLOBAL(ftrace_caller)
@@ -171,7 +170,6 @@ _GLOBAL(ftrace_caller)
.globl ftrace_call
ftrace_call:
bl ftrace_stub
- nop
ftrace_regs_exit 0
_GLOBAL(ftrace_stub)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5ea2014aff90..11e062b47d3f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1439,10 +1439,12 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return is_kernel_addr(addr);
}
+#endif
#ifdef CONFIG_MATH_EMULATION
static int emulate_math(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 74ddf836f7a2..a0467e528b70 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
-#include <asm/reg_a2.h>
#include <asm/early_ioremap.h>
extern u8 real_readb(volatile u8 __iomem *addr);
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 0c7d82c270c3..1b93655c2857 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -71,7 +71,7 @@ AS64FLAGS := -D__VDSO64__
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
targets += vdso64.lds
-CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
+CPPFLAGS_vdso64.lds += -P -C
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 85846cadb9b5..27fa9098a5b7 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -75,6 +75,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_OFFSET(mmu_psize_def, shift);
#endif
VMCOREINFO_SYMBOL(cur_cpu_spec);
+ VMCOREINFO_OFFSET(cpu_spec, cpu_features);
VMCOREINFO_OFFSET(cpu_spec, mmu_features);
vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 0bee7ca9a77c..762e4d09aacf 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -283,8 +283,7 @@ static void kexec_prepare_cpus(void)
* We could use a smaller stack if we don't care about anything using
* current, but that audit has not been performed.
*/
-static union thread_union kexec_stack __init_task_data =
- { };
+static union thread_union kexec_stack = { };
/*
* For similar reasons to the stack above, the kexecing CPU needs to be on a
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index eeb258002d1e..904016cf89ea 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -59,7 +59,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
- pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
+ kexec_dprintk("Loaded the kernel at 0x%lx\n", kernel_load_addr);
ret = kexec_load_purgatory(image, &pbuf);
if (ret) {
@@ -67,7 +67,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
goto out;
}
- pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
+ kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
if (image->type == KEXEC_TYPE_CRASH) {
@@ -99,7 +99,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
goto out;
initrd_load_addr = kbuf.mem;
- pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
+ kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
@@ -132,7 +132,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
fdt_load_addr = kbuf.mem;
- pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
+ kexec_dprintk("Loaded device tree at 0x%lx\n", fdt_load_addr);
slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset;
ret = setup_purgatory_ppc64(image, slave_code, fdt, kernel_load_addr,
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 961a6dd67365..5b4c5cb23354 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -577,7 +577,7 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn,
NODE_PATH_LEN, dn);
return -EOVERFLOW;
}
- pr_debug("Memory node path: %s\n", path);
+ kexec_dprintk("Memory node path: %s\n", path);
/* Now that we know the path, find its offset in kdump kernel's fdt */
node = fdt_path_offset(fdt, path);
@@ -590,8 +590,8 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn,
/* Get the address & size cells */
n_mem_addr_cells = of_n_addr_cells(dn);
n_mem_size_cells = of_n_size_cells(dn);
- pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
- n_mem_size_cells);
+ kexec_dprintk("address cells: %d, size cells: %d\n", n_mem_addr_cells,
+ n_mem_size_cells);
um_info->idx = 0;
if (!check_realloc_usable_mem(um_info, 2)) {
@@ -664,7 +664,7 @@ static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
if (node == -FDT_ERR_NOTFOUND)
- pr_debug("No dynamic reconfiguration memory found\n");
+ kexec_dprintk("No dynamic reconfiguration memory found\n");
else if (node < 0) {
pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
return -EINVAL;
@@ -776,8 +776,8 @@ static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
for (i = 0; i < ehdr->e_phnum; i++) {
if (phdr->p_paddr == BACKUP_SRC_START) {
phdr->p_offset = image->arch.backup_start;
- pr_debug("Backup region offset updated to 0x%lx\n",
- image->arch.backup_start);
+ kexec_dprintk("Backup region offset updated to 0x%lx\n",
+ image->arch.backup_start);
return;
}
}
@@ -850,7 +850,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
pr_err("Failed to load backup segment\n");
return ret;
}
- pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
+ kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem);
/* Load elfcorehdr segment - to export crashing kernel's vmcore */
ret = load_elfcorehdr_segment(image, kbuf);
@@ -858,8 +858,8 @@ int load_crashdump_segments_ppc64(struct kimage *image,
pr_err("Failed to load elfcorehdr segment\n");
return ret;
}
- pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
- image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
+ kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 6cd20ab9e94e..8acec144120e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -302,11 +302,11 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
- deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
+ deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
- deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
+ deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
case BOOK3S_IRQPRIO_SYSTEM_RESET:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 175a8eb2681f..4a1abb9f7c05 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -40,6 +40,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
unsigned long quadrant, ret = n;
bool is_load = !!to;
+ if (kvmhv_is_nestedv2())
+ return H_UNSUPPORTED;
+
/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
if (kvmhv_on_pseries())
return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
@@ -97,7 +100,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
{
int lpid = vcpu->kvm->arch.lpid;
- int pid = kvmppc_get_pid(vcpu);
+ int pid;
/* This would cause a data segment intr so don't allow the access */
if (eaddr & (0x3FFUL << 52))
@@ -110,6 +113,8 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
/* If accessing quadrant 3 then pid is expected to be 0 */
if (((eaddr >> 62) & 0x3) == 0x3)
pid = 0;
+ else
+ pid = kvmppc_get_pid(vcpu);
eaddr &= ~(0xFFFUL << 52);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1ed6ec140701..e48126a59ba7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -650,7 +650,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
return err;
}
-static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap,
+ struct kvmppc_vpa *old_vpap)
{
struct kvm *kvm = vcpu->kvm;
void *va;
@@ -690,9 +691,8 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
- if (vpap->pinned_addr)
- kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
- vpap->dirty);
+ *old_vpap = *vpap;
+
vpap->gpa = gpa;
vpap->pinned_addr = va;
vpap->dirty = false;
@@ -702,6 +702,9 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_vpa old_vpa = { 0 };
+
if (!(vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending))
@@ -709,17 +712,34 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
- kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
- if (vcpu->arch.vpa.pinned_addr)
+ kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa);
+ if (old_vpa.pinned_addr) {
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_vpa(vcpu, ~0ull);
+ kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
+ old_vpa.dirty);
+ }
+ if (vcpu->arch.vpa.pinned_addr) {
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr));
+ }
}
if (vcpu->arch.dtl.update_pending) {
- kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa);
+ if (old_vpa.pinned_addr)
+ kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
+ old_vpa.dirty);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
}
- if (vcpu->arch.slb_shadow.update_pending)
- kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
+ if (vcpu->arch.slb_shadow.update_pending) {
+ kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa);
+ if (old_vpa.pinned_addr)
+ kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
+ old_vpa.dirty);
+ }
+
spin_unlock(&vcpu->arch.vpa_update_lock);
}
@@ -1597,7 +1617,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
+ if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1688,7 +1708,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
{
int i;
- if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
+ if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
@@ -4084,6 +4104,8 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
if (rc < 0)
return -EINVAL;
+ kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr);
+
accumulate_time(vcpu, &vcpu->arch.in_guest);
rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
&trap, &i);
@@ -4736,13 +4758,19 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
- if (xive_interrupt_pending(vcpu))
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions) ||
+ xive_interrupt_pending(vcpu)) {
+ /*
+ * For nested HV, don't synthesize but always pass MER,
+ * the L0 will be able to optimise that more
+ * effectively than manipulating registers directly.
+ */
+ if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE))
kvmppc_inject_interrupt_hv(vcpu,
- BOOK3S_INTERRUPT_EXTERNAL, 0);
- } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
- &vcpu->arch.pending_exceptions)) {
- lpcr |= LPCR_MER;
+ BOOK3S_INTERRUPT_EXTERNAL, 0);
+ else
+ lpcr |= LPCR_MER;
}
} else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request ||
@@ -4806,7 +4834,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* entering a nested guest in which case the decrementer is now owned
* by L2 and the L1 decrementer is provided in hdec_expires
*/
- if (kvmppc_core_pending_dec(vcpu) &&
+ if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
@@ -4949,7 +4977,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
accumulate_time(vcpu, &vcpu->arch.hcall);
- if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
+ if (!kvmhv_is_nestedv2() && WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* These should have been caught reflected
* into the guest by now. Final sanity check:
@@ -5691,10 +5719,12 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}
- if (kvmhv_is_nestedv2())
+ if (kvmhv_is_nestedv2()) {
+ kvmhv_flush_lpid(kvm->arch.lpid);
plpar_guest_delete(0, kvm->arch.lpid);
- else
+ } else {
kvmppc_free_lpid(kvm->arch.lpid);
+ }
kvmppc_free_pimap(kvm);
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 3b658b8696bc..5c375ec1a3c6 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -503,7 +503,7 @@ void kvmhv_nested_exit(void)
}
}
-static void kvmhv_flush_lpid(u64 lpid)
+void kvmhv_flush_lpid(u64 lpid)
{
long rc;
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index fd3c4f2d9480..5378eb40b162 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -856,6 +856,35 @@ free_gsb:
EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
/**
+ * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
+ * @vcpu: vcpu
+ * @vpa: L1 logical real address
+ */
+int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ int rc = 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_input;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
+ if (rc < 0)
+ goto out;
+
+ rc = kvmppc_gsb_send(gsb, 0);
+ if (rc < 0)
+ pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
+
+out:
+ kvmppc_gsb_reset(gsb);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
+
+/**
* kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
* @vcpu: vcpu
*
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 9118242063fb..5b92619a05fd 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -604,6 +604,7 @@ static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
case PVR_POWER8:
case PVR_POWER8E:
case PVR_POWER8NVL:
+ case PVR_HX_C2000:
case PVR_POWER9:
vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
BOOK3S_HFLAG_NEW_TLBIE;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 077fd88a0b68..ec60c7979718 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -93,7 +93,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
- kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -112,7 +111,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
}
@@ -132,7 +131,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
#endif
@@ -224,16 +223,17 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break;
}
#endif
- case STORE:
- /* if need byte reverse, op.val has been reversed by
- * analyse_instr().
- */
- emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
+ case STORE: {
+ int instr_byte_swap = op.type & BYTEREV;
+
+ emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
+ size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
+ }
#ifdef CONFIG_PPC_FPU
case STORE_FP:
if (kvmppc_check_fp_disabled(vcpu))
@@ -254,7 +254,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
#endif
@@ -358,7 +358,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
- kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 51ad0397c17a..6eac63e79a89 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
ifndef CONFIG_LD_IS_BFD
-extra-$(CONFIG_PPC64) += crtsavres.o
+always-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a4ab8625061a..5766180f5380 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -586,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -636,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -680,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u = {};
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -688,7 +695,7 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
if (err)
return err;
if (unlikely(cross_endian))
- do_byte_reverse(&u.b[ea & 0xf], size);
+ do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
preempt_disable();
if (regs->msr & MSR_VEC)
put_vr(rn, &u.v);
@@ -707,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u;
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -719,7 +729,7 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u.v = current->thread.vr_state.vr[rn];
preempt_enable();
if (unlikely(cross_endian))
- do_byte_reverse(&u.b[ea & 0xf], size);
+ do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
}
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index ad2afa08e62e..0626a25b0d72 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -310,9 +310,16 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags
else
rflags |= 0x3;
}
+ VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping request");
} else {
if (pteflags & _PAGE_RWX)
rflags |= 0x2;
+ /*
+ * We should never hit this in normal fault handling because
+ * a permission check (check_pte_access()) will bubble this
+ * to higher level linux handler even for PAGE_NONE.
+ */
+ VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping request");
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
rflags |= 0x1;
}
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index d19fb1f3007d..c0e8d597e4cb 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -97,7 +97,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
}
mmap_read_lock(mm);
- chunk = (1UL << (PAGE_SHIFT + MAX_ORDER)) /
+ chunk = (1UL << (PAGE_SHIFT + MAX_PAGE_ORDER)) /
sizeof(struct vm_area_struct *);
chunk = min(chunk, entries);
for (entry = 0; entry < entries; entry += chunk) {
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index be229290a6a7..3438ab72c346 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -542,6 +542,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* For hash translation mode, we use the deposited table to store hash slot
* information and they are stored at PTRS_PER_PMD offset from related pmd
@@ -563,6 +564,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true;
}
+#endif
/*
* Does the CPU support tlbie?
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 125733962033..a974baf8f327 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -89,7 +89,8 @@ static int __init scan_pkey_feature(void)
unsigned long pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
- PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
+ PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9 ||
+ PVR_VER(pvr) == PVR_HX_C2000)
pkeys_total = 32;
}
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 9e49ede2bc1c..53335ae21a40 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -497,6 +497,8 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f7c683b672c1..0a540b37aab6 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -615,7 +615,7 @@ void __init gigantic_hugetlb_cma_reserve(void)
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
if (order) {
- VM_WARN_ON(order <= MAX_ORDER);
+ VM_WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
}
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 119ef491f797..d3a7726ecf51 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -126,7 +126,7 @@ void pgtable_cache_add(unsigned int shift)
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
- struct kmem_cache *new;
+ struct kmem_cache *new = NULL;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
@@ -139,7 +139,8 @@ void pgtable_cache_add(unsigned int shift)
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
+ if (name)
+ new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
if (!new)
panic("Could not allocate pgtable cache for order %d", shift);
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7f9ff0640124..72341b9fb552 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -181,3 +181,8 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
{
return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+#endif
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 10b946e9c6e7..b7ff680cde96 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2312,7 +2312,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
struct cpu_hw_events *cpuhw;
cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(event, cpuhw);
- perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack);
+ perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL);
}
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 39dbe6b348df..27f18119fda1 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -534,6 +534,9 @@ static ssize_t affinity_domain_via_partition_show(struct device *dev, struct dev
if (!ret)
goto parse_result;
+ if (ret && (ret != H_PARAMETER))
+ goto out;
+
/*
* ret value as 'H_PARAMETER' implies that the current buffer size
* can't accommodate all the information, and a partial buffer
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 5d12ca386c1f..8664a7d297ad 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -299,6 +299,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attr_group->attrs = attrs;
do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
+ if (!ev_val_str)
+ continue;
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str)
continue;
@@ -306,6 +308,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attrs[j++] = dev_str;
if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
+ if (!ev_scale_str)
+ continue;
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str)
continue;
@@ -315,6 +319,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
+ if (!ev_unit_str)
+ continue;
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str)
continue;
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 1624ebf95497..35a1f4b9f827 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -173,6 +173,7 @@ config ISS4xx
config CURRITUCK
bool "IBM Currituck (476fpe) Support"
depends on PPC_47x
+ select I2C
select SWIOTLB
select 476FPE
select FORCE_PCI
diff --git a/arch/powerpc/platforms/44x/idle.c b/arch/powerpc/platforms/44x/idle.c
index f533b495e7db..e2eeef8dff78 100644
--- a/arch/powerpc/platforms/44x/idle.c
+++ b/arch/powerpc/platforms/44x/idle.c
@@ -27,7 +27,7 @@ static void ppc44x_idle(void)
isync();
}
-int __init ppc44x_idle_init(void)
+static int __init ppc44x_idle_init(void)
{
if (!mode_spin) {
/* If we are not setting spin mode
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 6f08d07aee3b..e995eb30bf09 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -17,6 +17,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include "mpc5121_ads.h"
+
static struct device_node *cpld_pic_node;
static struct irq_domain *cpld_pic_host;
diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c
index ce51cfeeb066..8bbbf78bb42b 100644
--- a/arch/powerpc/platforms/512x/pdm360ng.c
+++ b/arch/powerpc/platforms/512x/pdm360ng.c
@@ -101,7 +101,7 @@ static inline void __init pdm360ng_touchscreen_init(void)
}
#endif /* CONFIG_TOUCHSCREEN_ADS7846 */
-void __init pdm360ng_init(void)
+static void __init pdm360ng_init(void)
{
mpc512x_init();
pdm360ng_touchscreen_init();
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 9833c36bda83..c9664e46b03d 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -261,9 +261,10 @@ static int mpc83xx_suspend_begin(suspend_state_t state)
static int agent_thread_fn(void *data)
{
+ set_freezable();
+
while (1) {
- wait_event_interruptible(agent_wq, pci_pm_state >= 2);
- try_to_freeze();
+ wait_event_freezable(agent_wq, pci_pm_state >= 2);
if (signal_pending(current) || pci_pm_state < 2)
continue;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index ec9f60fbebc7..e0cec670d8db 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -76,7 +76,7 @@ static void __init mpc85xx_rdb_setup_arch(void)
/* P1025 has pins muxed for QE and other functions. To
* enable QE UEC mode, we need to set bit QE0 for UCC1
* in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
- * and QE12 for QE MII management singals in PMUXCR
+ * and QE12 for QE MII management signals in PMUXCR
* register.
*/
setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 67467cd6f34c..06b1e5c49d6f 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -52,10 +52,3 @@ config MPC8641
select MPIC
default y if GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \
|| MVME7100
-
-config MPC8610
- bool
- select HAVE_PCI
- select FSL_PCI if PCI
- select PPC_UDBG_16550
- select MPIC
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index ef985ba2bf21..0761d98e5be3 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -64,7 +64,7 @@ static void __noreturn pas_restart(char *cmd)
}
#ifdef CONFIG_PPC_PASEMI_NEMO
-void pas_shutdown(void)
+static void pas_shutdown(void)
{
/* Set the PLD bit that makes the SB600 think the power button is being pressed */
void __iomem *pld_map = ioremap(0xf5000000,4096);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index c83d1e14077e..15644be31990 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -413,7 +413,7 @@ static void __init smp_psurge_setup_cpu(int cpu_nr)
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
-void __init smp_psurge_take_timebase(void)
+static void __init smp_psurge_take_timebase(void)
{
if (psurge_type != PSURGE_DUAL)
return;
@@ -429,7 +429,7 @@ void __init smp_psurge_take_timebase(void)
set_dec(tb_ticks_per_jiffy/2);
}
-void __init smp_psurge_give_timebase(void)
+static void __init smp_psurge_give_timebase(void)
{
/* Nothing to do here */
}
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index f9a7001dacb7..56a1f7ce78d2 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -275,6 +275,8 @@ int __init opal_event_init(void)
else
name = kasprintf(GFP_KERNEL, "opal");
+ if (!name)
+ continue;
/* Install interrupt handler */
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL);
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index 7bfe4cbeb35a..ea917266aa17 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
j = 0;
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
+ if (!pcaps[i].pg.name) {
+ kfree(pcaps[i].pattrs);
+ kfree(pcaps[i].pg.attrs);
+ goto out_pcaps_pattrs;
+ }
+
if (has_min) {
powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 327e2f76905d..b66b06efcef1 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -66,6 +66,8 @@ static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
const char *label;
addrp = of_get_address(node, 0, &range_size, NULL);
+ if (!addrp)
+ continue;
range_addr = of_read_number(addrp, 2);
range_end = range_addr + range_size;
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 262cd6fac907..748c2b97fa53 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
+ if (!ent->path.data) {
+ kfree(ent);
+ return -ENOMEM;
+ }
+
ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 28fac4770073..23f5b5093ec1 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1389,7 +1389,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
* DMA window can be larger than available memory, which will
* cause errors later.
*/
- const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER);
+ const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_PAGE_ORDER);
/*
* We create the default window as big as we can. The constraint is
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 191424468f10..393e747541fb 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -425,7 +425,8 @@ static int subcore_init(void)
if (pvr_ver != PVR_POWER8 &&
pvr_ver != PVR_POWER8E &&
- pvr_ver != PVR_POWER8NVL)
+ pvr_ver != PVR_POWER8NVL &&
+ pvr_ver != PVR_HX_C2000)
return 0;
/*
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index a44869e5ea70..e9c1087dd42e 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -167,16 +167,4 @@ config PS3_LPM
profiling support of the Cell processor with programs like
perfmon2, then say Y or M, otherwise say N.
-config PS3GELIC_UDBG
- bool "PS3 udbg output via UDP broadcasts on Ethernet"
- depends on PPC_PS3
- help
- Enables udbg early debugging output by sending broadcast UDP
- via the Ethernet port (UDP port number 18194).
-
- This driver uses a trivial implementation and is independent
- from the main PS3 gelic network driver.
-
- If in doubt, say N here.
-
endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
index 86bf2967a8d4..bc79bb124d1e 100644
--- a/arch/powerpc/platforms/ps3/Makefile
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -3,7 +3,7 @@ obj-y += setup.o mm.o time.o hvcall.o htab.o repository.o
obj-y += interrupt.o exports.o os-area.o
obj-y += system-bus.o
-obj-$(CONFIG_PS3GELIC_UDBG) += gelic_udbg.o
+obj-$(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) += gelic_udbg.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SPU_BASE) += spu.o
obj-y += device-init.o
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index e87360a0fb40..878bc160246e 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -827,6 +827,7 @@ static int ps3_probe_thread(void *data)
if (res)
goto fail_free_irq;
+ set_freezable();
/* Loop here processing the requested notification events. */
do {
try_to_freeze();
diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c
index 6b298010fd84..a5202c18c236 100644
--- a/arch/powerpc/platforms/ps3/gelic_udbg.c
+++ b/arch/powerpc/platforms/ps3/gelic_udbg.c
@@ -14,6 +14,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
+#include <asm/ps3.h>
#include <asm/io.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 1476c5e4433c..f936962a2946 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
of_helpers.o rtas-work-area.o papr-sysparm.o \
+ papr-vpd.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o rng.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o \
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index def184da51cf..b1ae0c0d1187 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -252,7 +252,7 @@ static int pseries_eeh_cap_start(struct pci_dn *pdn)
if (!pdn)
return 0;
- rtas_read_config(pdn, PCI_STATUS, 2, &status);
+ rtas_pci_dn_read_config(pdn, PCI_STATUS, 2, &status);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
@@ -270,11 +270,11 @@ static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
return 0;
while (cnt--) {
- rtas_read_config(pdn, pos, 1, &pos);
+ rtas_pci_dn_read_config(pdn, pos, 1, &pos);
if (pos < 0x40)
break;
pos &= ~3;
- rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ rtas_pci_dn_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
if (id == 0xff)
break;
if (id == cap)
@@ -294,7 +294,7 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
if (!edev || !edev->pcie_cap)
return 0;
- if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
return 0;
else if (!header)
return 0;
@@ -307,7 +307,7 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
if (pos < 256)
break;
- if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
break;
}
@@ -412,8 +412,8 @@ static void pseries_eeh_init_edev(struct pci_dn *pdn)
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
if (edev->pcie_cap) {
- rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
- 2, &pcie_flags);
+ rtas_pci_dn_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
edev->mode |= EEH_DEV_ROOT_PORT;
@@ -676,7 +676,7 @@ static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u3
{
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- return rtas_read_config(pdn, where, size, val);
+ return rtas_pci_dn_read_config(pdn, where, size, val);
}
/**
@@ -692,7 +692,7 @@ static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u
{
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- return rtas_write_config(pdn, where, size, val);
+ return rtas_pci_dn_write_config(pdn, where, size, val);
}
#ifdef CONFIG_PCI_IOV
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index a43bfb01720a..3fe3ddb30c04 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -208,8 +208,10 @@ static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
int rc;
mem_block = lmb_to_memblock(lmb);
- if (!mem_block)
+ if (!mem_block) {
+ pr_err("Failed memory block lookup for LMB 0x%x\n", lmb->drc_index);
return -EINVAL;
+ }
if (online && mem_block->dev.offline)
rc = device_online(&mem_block->dev);
@@ -436,14 +438,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
}
}
- if (!lmb_found)
+ if (!lmb_found) {
+ pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
rc = -EINVAL;
-
- if (rc)
+ } else if (rc) {
pr_debug("Failed to hot-remove memory at %llx\n",
lmb->base_addr);
- else
+ } else {
pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
+ }
return rc;
}
@@ -575,6 +578,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = update_lmb_associativity_index(lmb);
if (rc) {
dlpar_release_drc(lmb->drc_index);
+ pr_err("Failed to configure LMB 0x%x\n", lmb->drc_index);
return rc;
}
@@ -588,12 +592,14 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
/* Add the memory */
rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
if (rc) {
+ pr_err("Failed to add LMB 0x%x to node %u", lmb->drc_index, nid);
invalidate_lmb_associativity_index(lmb);
return rc;
}
rc = dlpar_online_lmb(lmb);
if (rc) {
+ pr_err("Failed to online LMB 0x%x on node %u\n", lmb->drc_index, nid);
__remove_memory(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
} else {
diff --git a/arch/powerpc/platforms/pseries/papr-sysparm.c b/arch/powerpc/platforms/pseries/papr-sysparm.c
index fedc61599e6c..7063ce8884e4 100644
--- a/arch/powerpc/platforms/pseries/papr-sysparm.c
+++ b/arch/powerpc/platforms/pseries/papr-sysparm.c
@@ -2,14 +2,20 @@
#define pr_fmt(fmt) "papr-sysparm: " fmt
+#include <linux/anon_inodes.h>
#include <linux/bug.h>
+#include <linux/file.h>
+#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/miscdevice.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <asm/rtas.h>
+#include <linux/uaccess.h>
+#include <asm/machdep.h>
#include <asm/papr-sysparm.h>
#include <asm/rtas-work-area.h>
+#include <asm/rtas.h>
struct papr_sysparm_buf *papr_sysparm_buf_alloc(void)
{
@@ -23,6 +29,46 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
kfree(buf);
}
+static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf)
+{
+ return be16_to_cpu(buf->len);
+}
+
+static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length)
+{
+ WARN_ONCE(length > sizeof(buf->val),
+ "bogus length %zu, clamping to safe value", length);
+ length = min(sizeof(buf->val), length);
+ buf->len = cpu_to_be16(length);
+}
+
+/*
+ * For use on buffers returned from ibm,get-system-parameter before
+ * returning them to callers. Ensures the encoded length of valid data
+ * cannot overrun buf->val[].
+ */
+static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf)
+{
+ papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf));
+}
+
+/*
+ * Perform some basic diligence on the system parameter buffer before
+ * submitting it to RTAS.
+ */
+static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf)
+{
+ /*
+ * Firmware ought to reject buffer lengths that exceed the
+ * maximum specified in PAPR, but there's no reason for the
+ * kernel to allow them either.
+ */
+ if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val))
+ return false;
+
+ return true;
+}
+
/**
* papr_sysparm_get() - Retrieve the value of a PAPR system parameter.
* @param: PAPR system parameter token as described in
@@ -47,7 +93,6 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
*
* Return: 0 on success, -errno otherwise. @buf is unmodified on error.
*/
-
int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
{
const s32 token = rtas_function_token(RTAS_FN_IBM_GET_SYSTEM_PARAMETER);
@@ -63,6 +108,9 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
+ if (!papr_sysparm_buf_can_submit(buf))
+ return -EINVAL;
+
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
@@ -77,6 +125,7 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
case 0:
ret = 0;
memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf));
+ papr_sysparm_buf_clamp_length(buf);
break;
case -3: /* parameter not implemented */
ret = -EOPNOTSUPP;
@@ -115,6 +164,9 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
+ if (!papr_sysparm_buf_can_submit(buf))
+ return -EINVAL;
+
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
@@ -149,3 +201,152 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
return ret;
}
+
+static struct papr_sysparm_buf *
+papr_sysparm_buf_from_user(const struct papr_sysparm_io_block __user *user_iob)
+{
+ struct papr_sysparm_buf *kern_spbuf;
+ long err;
+ u16 len;
+
+ /*
+ * The length of valid data that userspace claims to be in
+ * user_iob->data[].
+ */
+ if (get_user(len, &user_iob->length))
+ return ERR_PTR(-EFAULT);
+
+ static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_INPUT);
+ static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_INPUT);
+
+ if (len > PAPR_SYSPARM_MAX_INPUT)
+ return ERR_PTR(-EINVAL);
+
+ kern_spbuf = papr_sysparm_buf_alloc();
+ if (!kern_spbuf)
+ return ERR_PTR(-ENOMEM);
+
+ papr_sysparm_buf_set_length(kern_spbuf, len);
+
+ if (len > 0 && copy_from_user(kern_spbuf->val, user_iob->data, len)) {
+ err = -EFAULT;
+ goto free_sysparm_buf;
+ }
+
+ return kern_spbuf;
+
+free_sysparm_buf:
+ papr_sysparm_buf_free(kern_spbuf);
+ return ERR_PTR(err);
+}
+
+static int papr_sysparm_buf_to_user(const struct papr_sysparm_buf *kern_spbuf,
+ struct papr_sysparm_io_block __user *user_iob)
+{
+ u16 len_out = papr_sysparm_buf_get_length(kern_spbuf);
+
+ if (put_user(len_out, &user_iob->length))
+ return -EFAULT;
+
+ static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_OUTPUT);
+ static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_OUTPUT);
+
+ if (copy_to_user(user_iob->data, kern_spbuf->val, PAPR_SYSPARM_MAX_OUTPUT))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long papr_sysparm_ioctl_get(struct papr_sysparm_io_block __user *user_iob)
+{
+ struct papr_sysparm_buf *kern_spbuf;
+ papr_sysparm_t param;
+ long ret;
+
+ if (get_user(param.token, &user_iob->parameter))
+ return -EFAULT;
+
+ kern_spbuf = papr_sysparm_buf_from_user(user_iob);
+ if (IS_ERR(kern_spbuf))
+ return PTR_ERR(kern_spbuf);
+
+ ret = papr_sysparm_get(param, kern_spbuf);
+ if (ret)
+ goto free_sysparm_buf;
+
+ ret = papr_sysparm_buf_to_user(kern_spbuf, user_iob);
+ if (ret)
+ goto free_sysparm_buf;
+
+ ret = 0;
+
+free_sysparm_buf:
+ papr_sysparm_buf_free(kern_spbuf);
+ return ret;
+}
+
+
+static long papr_sysparm_ioctl_set(struct papr_sysparm_io_block __user *user_iob)
+{
+ struct papr_sysparm_buf *kern_spbuf;
+ papr_sysparm_t param;
+ long ret;
+
+ if (get_user(param.token, &user_iob->parameter))
+ return -EFAULT;
+
+ kern_spbuf = papr_sysparm_buf_from_user(user_iob);
+ if (IS_ERR(kern_spbuf))
+ return PTR_ERR(kern_spbuf);
+
+ ret = papr_sysparm_set(param, kern_spbuf);
+ if (ret)
+ goto free_sysparm_buf;
+
+ ret = 0;
+
+free_sysparm_buf:
+ papr_sysparm_buf_free(kern_spbuf);
+ return ret;
+}
+
+static long papr_sysparm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (__force void __user *)arg;
+ long ret;
+
+ switch (ioctl) {
+ case PAPR_SYSPARM_IOC_GET:
+ ret = papr_sysparm_ioctl_get(argp);
+ break;
+ case PAPR_SYSPARM_IOC_SET:
+ if (filp->f_mode & FMODE_WRITE)
+ ret = papr_sysparm_ioctl_set(argp);
+ else
+ ret = -EBADF;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations papr_sysparm_ops = {
+ .unlocked_ioctl = papr_sysparm_ioctl,
+};
+
+static struct miscdevice papr_sysparm_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "papr-sysparm",
+ .fops = &papr_sysparm_ops,
+};
+
+static __init int papr_sysparm_init(void)
+{
+ if (!rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER))
+ return -ENODEV;
+
+ return misc_register(&papr_sysparm_dev);
+}
+machine_device_initcall(pseries, papr_sysparm_init);
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
new file mode 100644
index 000000000000..c29e85db5f35
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "papr-vpd: " fmt
+
+#include <linux/anon_inodes.h>
+#include <linux/build_bug.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/lockdep.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/uaccess.h>
+#include <asm/machdep.h>
+#include <asm/papr-vpd.h>
+#include <asm/rtas-work-area.h>
+#include <asm/rtas.h>
+#include <uapi/asm/papr-vpd.h>
+
+/*
+ * Function-specific return values for ibm,get-vpd, derived from PAPR+
+ * v2.13 7.3.20 "ibm,get-vpd RTAS Call".
+ */
+#define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */
+#define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */
+#define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */
+
+/**
+ * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd.
+ * @loc_code: In: Caller-provided location code buffer. Must be RTAS-addressable.
+ * @work_area: In: Caller-provided work area buffer for results.
+ * @sequence: In: Sequence number. Out: Next sequence number.
+ * @written: Out: Bytes written by ibm,get-vpd to @work_area.
+ * @status: Out: RTAS call status.
+ */
+struct rtas_ibm_get_vpd_params {
+ const struct papr_location_code *loc_code;
+ struct rtas_work_area *work_area;
+ u32 sequence;
+ u32 written;
+ s32 status;
+};
+
+/**
+ * rtas_ibm_get_vpd() - Call ibm,get-vpd to fill a work area buffer.
+ * @params: See &struct rtas_ibm_get_vpd_params.
+ *
+ * Calls ibm,get-vpd until it errors or successfully deposits data
+ * into the supplied work area. Handles RTAS retry statuses. Maps RTAS
+ * error statuses to reasonable errno values.
+ *
+ * The caller is expected to invoke rtas_ibm_get_vpd() multiple times
+ * to retrieve all the VPD for the provided location code. Only one
+ * sequence should be in progress at any time; starting a new sequence
+ * will disrupt any sequence already in progress. Serialization of VPD
+ * retrieval sequences is the responsibility of the caller.
+ *
+ * The caller should inspect @params.status to determine whether more
+ * calls are needed to complete the sequence.
+ *
+ * Context: May sleep.
+ * Return: -ve on error, 0 otherwise.
+ */
+static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params)
+{
+ const struct papr_location_code *loc_code = params->loc_code;
+ struct rtas_work_area *work_area = params->work_area;
+ u32 rets[2];
+ s32 fwrc;
+ int ret;
+
+ lockdep_assert_held(&rtas_ibm_get_vpd_lock);
+
+ do {
+ fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_GET_VPD), 4, 3,
+ rets,
+ __pa(loc_code),
+ rtas_work_area_phys(work_area),
+ rtas_work_area_size(work_area),
+ params->sequence);
+ } while (rtas_busy_delay(fwrc));
+
+ switch (fwrc) {
+ case RTAS_HARDWARE_ERROR:
+ ret = -EIO;
+ break;
+ case RTAS_INVALID_PARAMETER:
+ ret = -EINVAL;
+ break;
+ case RTAS_IBM_GET_VPD_START_OVER:
+ ret = -EAGAIN;
+ break;
+ case RTAS_IBM_GET_VPD_MORE_DATA:
+ params->sequence = rets[0];
+ fallthrough;
+ case RTAS_IBM_GET_VPD_COMPLETE:
+ params->written = rets[1];
+ /*
+ * Kernel or firmware bug, do not continue.
+ */
+ if (WARN(params->written > rtas_work_area_size(work_area),
+ "possible write beyond end of work area"))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ default:
+ ret = -EIO;
+ pr_err_ratelimited("unexpected ibm,get-vpd status %d\n", fwrc);
+ break;
+ }
+
+ params->status = fwrc;
+ return ret;
+}
+
+/*
+ * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into
+ * an immutable buffer to be attached to a file descriptor.
+ */
+struct vpd_blob {
+ const char *data;
+ size_t len;
+};
+
+static bool vpd_blob_has_data(const struct vpd_blob *blob)
+{
+ return blob->data && blob->len;
+}
+
+static void vpd_blob_free(const struct vpd_blob *blob)
+{
+ if (blob) {
+ kvfree(blob->data);
+ kfree(blob);
+ }
+}
+
+/**
+ * vpd_blob_extend() - Append data to a &struct vpd_blob.
+ * @blob: The blob to extend.
+ * @data: The new data to append to @blob.
+ * @len: The length of @data.
+ *
+ * Context: May sleep.
+ * Return: -ENOMEM on allocation failure, 0 otherwise.
+ */
+static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
+{
+ const size_t new_len = blob->len + len;
+ const size_t old_len = blob->len;
+ const char *old_ptr = blob->data;
+ char *new_ptr;
+
+ new_ptr = old_ptr ?
+ kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) :
+ kvmalloc(len, GFP_KERNEL_ACCOUNT);
+
+ if (!new_ptr)
+ return -ENOMEM;
+
+ memcpy(&new_ptr[old_len], data, len);
+ blob->data = new_ptr;
+ blob->len = new_len;
+ return 0;
+}
+
+/**
+ * vpd_blob_generate() - Construct a new &struct vpd_blob.
+ * @generator: Function that supplies the blob data.
+ * @arg: Context pointer supplied by caller, passed to @generator.
+ *
+ * The @generator callback is invoked until it returns NULL. @arg is
+ * passed to @generator in its first argument on each call. When
+ * @generator returns data, it should store the data length in its
+ * second argument.
+ *
+ * Context: May sleep.
+ * Return: A completely populated &struct vpd_blob, or NULL on error.
+ */
+static const struct vpd_blob *
+vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg)
+{
+ struct vpd_blob *blob;
+ const char *buf;
+ size_t len;
+ int err = 0;
+
+ blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT);
+ if (!blob)
+ return NULL;
+
+ while (err == 0 && (buf = generator(arg, &len)))
+ err = vpd_blob_extend(blob, buf, len);
+
+ if (err != 0 || !vpd_blob_has_data(blob))
+ goto free_blob;
+
+ return blob;
+free_blob:
+ vpd_blob_free(blob);
+ return NULL;
+}
+
+/*
+ * Internal VPD sequence APIs. A VPD sequence is a series of calls to
+ * ibm,get-vpd for a given location code. The sequence ends when an
+ * error is encountered or all VPD for the location code has been
+ * returned.
+ */
+
+/**
+ * struct vpd_sequence - State for managing a VPD sequence.
+ * @error: Shall be zero as long as the sequence has not encountered an error,
+ * -ve errno otherwise. Use vpd_sequence_set_err() to update this.
+ * @params: Parameter block to pass to rtas_ibm_get_vpd().
+ */
+struct vpd_sequence {
+ int error;
+ struct rtas_ibm_get_vpd_params params;
+};
+
+/**
+ * vpd_sequence_begin() - Begin a VPD retrieval sequence.
+ * @seq: Uninitialized sequence state.
+ * @loc_code: Location code that defines the scope of the VPD to return.
+ *
+ * Initializes @seq with the resources necessary to carry out a VPD
+ * sequence. Callers must pass @seq to vpd_sequence_end() regardless
+ * of whether the sequence succeeds.
+ *
+ * Context: May sleep.
+ */
+static void vpd_sequence_begin(struct vpd_sequence *seq,
+ const struct papr_location_code *loc_code)
+{
+ /*
+ * Use a static data structure for the location code passed to
+ * RTAS to ensure it's in the RMA and avoid a separate work
+ * area allocation. Guarded by the function lock.
+ */
+ static struct papr_location_code static_loc_code;
+
+ /*
+ * We could allocate the work area before acquiring the
+ * function lock, but that would allow concurrent requests to
+ * exhaust the limited work area pool for no benefit. So
+ * allocate the work area under the lock.
+ */
+ mutex_lock(&rtas_ibm_get_vpd_lock);
+ static_loc_code = *loc_code;
+ *seq = (struct vpd_sequence) {
+ .params = {
+ .work_area = rtas_work_area_alloc(SZ_4K),
+ .loc_code = &static_loc_code,
+ .sequence = 1,
+ },
+ };
+}
+
+/**
+ * vpd_sequence_end() - Finalize a VPD retrieval sequence.
+ * @seq: Sequence state.
+ *
+ * Releases resources obtained by vpd_sequence_begin().
+ */
+static void vpd_sequence_end(struct vpd_sequence *seq)
+{
+ rtas_work_area_free(seq->params.work_area);
+ mutex_unlock(&rtas_ibm_get_vpd_lock);
+}
+
+/**
+ * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence
+ * should continue.
+ * @seq: VPD sequence state.
+ *
+ * Examines the sequence error state and outputs of the last call to
+ * ibm,get-vpd to determine whether the sequence in progress should
+ * continue or stop.
+ *
+ * Return: True if the sequence has encountered an error or if all VPD for
+ * this sequence has been retrieved. False otherwise.
+ */
+static bool vpd_sequence_should_stop(const struct vpd_sequence *seq)
+{
+ bool done;
+
+ if (seq->error)
+ return true;
+
+ switch (seq->params.status) {
+ case 0:
+ if (seq->params.written == 0)
+ done = false; /* Initial state. */
+ else
+ done = true; /* All data consumed. */
+ break;
+ case 1:
+ done = false; /* More data available. */
+ break;
+ default:
+ done = true; /* Error encountered. */
+ break;
+ }
+
+ return done;
+}
+
+static int vpd_sequence_set_err(struct vpd_sequence *seq, int err)
+{
+ /* Preserve the first error recorded. */
+ if (seq->error == 0)
+ seq->error = err;
+
+ return seq->error;
+}
+
+/*
+ * Generator function to be passed to vpd_blob_generate().
+ */
+static const char *vpd_sequence_fill_work_area(void *arg, size_t *len)
+{
+ struct vpd_sequence *seq = arg;
+ struct rtas_ibm_get_vpd_params *p = &seq->params;
+
+ if (vpd_sequence_should_stop(seq))
+ return NULL;
+ if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p)))
+ return NULL;
+ *len = p->written;
+ return rtas_work_area_raw_buf(p->work_area);
+}
+
+/*
+ * Higher-level VPD retrieval code below. These functions use the
+ * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based
+ * VPD handles for consumption by user space.
+ */
+
+/**
+ * papr_vpd_run_sequence() - Run a single VPD retrieval sequence.
+ * @loc_code: Location code that defines the scope of VPD to return.
+ *
+ * Context: May sleep. Holds a mutex and an RTAS work area for its
+ * duration. Typically performs multiple sleepable slab
+ * allocations.
+ *
+ * Return: A populated &struct vpd_blob on success. Encoded error
+ * pointer otherwise.
+ */
+static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code)
+{
+ const struct vpd_blob *blob;
+ struct vpd_sequence seq;
+
+ vpd_sequence_begin(&seq, loc_code);
+ blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq);
+ if (!blob)
+ vpd_sequence_set_err(&seq, -ENOMEM);
+ vpd_sequence_end(&seq);
+
+ if (seq.error) {
+ vpd_blob_free(blob);
+ return ERR_PTR(seq.error);
+ }
+
+ return blob;
+}
+
+/**
+ * papr_vpd_retrieve() - Return the VPD for a location code.
+ * @loc_code: Location code that defines the scope of VPD to return.
+ *
+ * Run VPD sequences against @loc_code until a blob is successfully
+ * instantiated, or a hard error is encountered, or a fatal signal is
+ * pending.
+ *
+ * Context: May sleep.
+ * Return: A fully populated VPD blob when successful. Encoded error
+ * pointer otherwise.
+ */
+static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code)
+{
+ const struct vpd_blob *blob;
+
+ /*
+ * EAGAIN means the sequence errored with a -4 (VPD changed)
+ * status from ibm,get-vpd, and we should attempt a new
+ * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this
+ * should be a transient condition, not something that happens
+ * continuously. But we'll stop trying on a fatal signal.
+ */
+ do {
+ blob = papr_vpd_run_sequence(loc_code);
+ if (!IS_ERR(blob)) /* Success. */
+ break;
+ if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */
+ break;
+ pr_info_ratelimited("VPD changed during retrieval, retrying\n");
+ cond_resched();
+ } while (!fatal_signal_pending(current));
+
+ return blob;
+}
+
+static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off)
+{
+ const struct vpd_blob *blob = file->private_data;
+
+ /* bug: we should not instantiate a handle without any data attached. */
+ if (!vpd_blob_has_data(blob)) {
+ pr_err_once("handle without data\n");
+ return -EIO;
+ }
+
+ return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
+}
+
+static int papr_vpd_handle_release(struct inode *inode, struct file *file)
+{
+ const struct vpd_blob *blob = file->private_data;
+
+ vpd_blob_free(blob);
+
+ return 0;
+}
+
+static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence)
+{
+ const struct vpd_blob *blob = file->private_data;
+
+ return fixed_size_llseek(file, off, whence, blob->len);
+}
+
+
+static const struct file_operations papr_vpd_handle_ops = {
+ .read = papr_vpd_handle_read,
+ .llseek = papr_vpd_handle_seek,
+ .release = papr_vpd_handle_release,
+};
+
+/**
+ * papr_vpd_create_handle() - Create a fd-based handle for reading VPD.
+ * @ulc: Location code in user memory; defines the scope of the VPD to
+ * retrieve.
+ *
+ * Handler for PAPR_VPD_IOC_CREATE_HANDLE ioctl command. Validates
+ * @ulc and instantiates an immutable VPD "blob" for it. The blob is
+ * attached to a file descriptor for reading by user space. The memory
+ * backing the blob is freed when the file is released.
+ *
+ * The entire requested VPD is retrieved by this call and all
+ * necessary RTAS interactions are performed before returning the fd
+ * to user space. This keeps the read handler simple and ensures that
+ * the kernel can prevent interleaving of ibm,get-vpd call sequences.
+ *
+ * Return: The installed fd number if successful, -ve errno otherwise.
+ */
+static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
+{
+ struct papr_location_code klc;
+ const struct vpd_blob *blob;
+ struct file *file;
+ long err;
+ int fd;
+
+ if (copy_from_user(&klc, ulc, sizeof(klc)))
+ return -EFAULT;
+
+ if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str)))
+ return -EINVAL;
+
+ blob = papr_vpd_retrieve(&klc);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = fd;
+ goto free_blob;
+ }
+
+ file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops,
+ (void *)blob, O_RDONLY);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ file->f_mode |= FMODE_LSEEK | FMODE_PREAD;
+ fd_install(fd, file);
+ return fd;
+put_fd:
+ put_unused_fd(fd);
+free_blob:
+ vpd_blob_free(blob);
+ return err;
+}
+
+/*
+ * Top-level ioctl handler for /dev/papr-vpd.
+ */
+static long papr_vpd_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (__force void __user *)arg;
+ long ret;
+
+ switch (ioctl) {
+ case PAPR_VPD_IOC_CREATE_HANDLE:
+ ret = papr_vpd_create_handle(argp);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations papr_vpd_ops = {
+ .unlocked_ioctl = papr_vpd_dev_ioctl,
+};
+
+static struct miscdevice papr_vpd_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "papr-vpd",
+ .fops = &papr_vpd_ops,
+};
+
+static __init int papr_vpd_init(void)
+{
+ if (!rtas_function_implemented(RTAS_FN_IBM_GET_VPD))
+ return -ENODEV;
+
+ return misc_register(&papr_vpd_dev);
+}
+machine_device_initcall(pseries, papr_vpd_init);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 8376f03f932a..bba4ad192b0f 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -55,6 +55,7 @@ extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
extern int dlpar_unisolate_drc(u32 drc_index);
+extern void post_mobility_fixup(void);
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog);
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 5c43435472cc..382003dfdb9a 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -13,6 +13,7 @@
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
+#include "pseries.h"
static struct device suspend_dev;
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index fd2f94a884f0..7dce8278b71e 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -18,24 +18,8 @@
#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
| (((o) & ~3) << 24))
-#define GRACKLE_PICR1_STG 0x00000040
#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
-/* N.B. this is called before bridges is initialized, so we can't
- use grackle_pcibios_{read,write}_config_dword. */
-static inline void grackle_set_stg(struct pci_controller* bp, int enable)
-{
- unsigned int val;
-
- out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
- val = in_le32(bp->cfg_data);
- val = enable? (val | GRACKLE_PICR1_STG) :
- (val & ~GRACKLE_PICR1_STG);
- out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
- out_le32(bp->cfg_data, val);
- (void)in_le32(bp->cfg_data);
-}
-
static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
{
unsigned int val;
@@ -56,7 +40,4 @@ void __init setup_grackle(struct pci_controller *hose)
pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
-#if 0 /* Disabled for now, HW problems ??? */
- grackle_set_stg(hose, 1);
-#endif
}
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index f6ec6dba92dc..700b67476a7d 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
cpu, hw_id);
+ if (!rname)
+ return -ENOMEM;
if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id);
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index ab00235b018f..7b4287f36054 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -673,6 +673,7 @@ static inline int pmd_write(pmd_t pmd)
return pte_write(pmd_pte(pmd));
}
+#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return pte_dirty(pmd_pte(pmd));
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h
index e316ab3b77f3..61183688bdd5 100644
--- a/arch/riscv/include/asm/topology.h
+++ b/arch/riscv/include/asm/topology.h
@@ -9,6 +9,7 @@
#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index fee22a3d1b53..82940b6a79a2 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -11,7 +11,7 @@ endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
-ifdef CONFIG_KEXEC
+ifdef CONFIG_KEXEC_CORE
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
endif
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index e60fbd8660c4..5bd1ec3341fe 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -216,7 +216,6 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
kernel_start = image->start;
- pr_notice("The entry point of kernel at 0x%lx\n", image->start);
/* Add the kernel binary to the image */
ret = riscv_kexec_elf_load(image, &ehdr, &elf_info,
@@ -252,8 +251,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
image->elf_load_addr = kbuf.mem;
image->elf_headers_sz = headers_sz;
- pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
/* Setup cmdline for kdump kernel case */
modified_cmdline = setup_kdump_cmdline(image, cmdline,
@@ -275,6 +274,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
pr_err("Error loading purgatory ret=%d\n", ret);
goto out;
}
+ kexec_dprintk("Loaded purgatory at 0x%lx\n", kbuf.mem);
+
ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry",
&kernel_start,
sizeof(kernel_start), 0);
@@ -293,7 +294,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
initrd_pbase = kbuf.mem;
- pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase);
+ kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_pbase);
}
/* Add the DTB to the image */
@@ -318,7 +319,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
}
/* Cache the fdt buffer address for memory cleanup */
image->arch.fdt = fdt;
- pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
+ kexec_dprintk("Loaded device tree at 0x%lx\n", kbuf.mem);
goto out;
out_free_fdt:
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index 2d139b724bc8..ed9cad20c039 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -19,30 +19,6 @@
#include <linux/irq.h>
/*
- * kexec_image_info - Print received image details
- */
-static void
-kexec_image_info(const struct kimage *image)
-{
- unsigned long i;
-
- pr_debug("Kexec image info:\n");
- pr_debug("\ttype: %d\n", image->type);
- pr_debug("\tstart: %lx\n", image->start);
- pr_debug("\thead: %lx\n", image->head);
- pr_debug("\tnr_segments: %lu\n", image->nr_segments);
-
- for (i = 0; i < image->nr_segments; i++) {
- pr_debug("\t segment[%lu]: %016lx - %016lx", i,
- image->segment[i].mem,
- image->segment[i].mem + image->segment[i].memsz);
- pr_debug("\t\t0x%lx bytes, %lu pages\n",
- (unsigned long) image->segment[i].memsz,
- (unsigned long) image->segment[i].memsz / PAGE_SIZE);
- }
-}
-
-/*
* machine_kexec_prepare - Initialize kexec
*
* This function is called from do_kexec_load, when the user has
@@ -60,8 +36,6 @@ machine_kexec_prepare(struct kimage *image)
unsigned int control_code_buffer_sz = 0;
int i = 0;
- kexec_image_info(image);
-
/* Find the Flattened Device Tree and save its physical address */
for (i = 0; i < image->nr_segments; i++) {
if (image->segment[i].memsz <= sizeof(fdt))
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 90d4ba36d1d0..081339ddf47e 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -304,6 +304,8 @@ void handle_page_fault(struct pt_regs *regs)
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d5d8f99d1f25..959adae10f80 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -146,7 +146,7 @@ config S390
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_TIME_NS
select GENERIC_IOREMAP if PCI
- select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 601e87fa8a9a..1299b56e43f6 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -770,6 +770,7 @@ static inline int pud_write(pud_t pud)
return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
}
+#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 86fec9b080f6..095bb86339a7 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -459,3 +459,8 @@
454 common futex_wake sys_futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue sys_futex_requeue
+457 common statmount sys_statmount sys_statmount
+458 common listmount sys_listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 1d2aa448d103..cc3e3a01dfa5 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -43,10 +43,12 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
return (void __user *) (address - (regs->int_code >> 16));
}
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
+#endif
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 249aefcf7c4e..ab4098886e56 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -337,6 +337,9 @@ static void do_exception(struct pt_regs *regs, int access)
return;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 363fae0fe9bf..86fe269f0220 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -459,3 +459,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 511c17aede4a..455311d9a5e9 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -26,7 +26,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE:_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 49849790e66d..204c43cb3d43 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -277,7 +277,7 @@ config ARCH_FORCE_MAX_ORDER
default "12"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 5e41033bf4ca..a8c871b7d786 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -706,6 +706,7 @@ static inline unsigned long pmd_write(pmd_t pmd)
#define pud_write(pud) pte_write(__pte(pud_val(pud)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_dirty pmd_dirty
static inline unsigned long pmd_dirty(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0984bb6f0f17..58ea4ef9b622 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -5,7 +5,6 @@
#
asflags-y := -ansi
-ccflags-y := -Werror
# Undefine sparc when processing vmlinux.lds - it is used
# And teach CPP we are doing $(BITS) builds (for this case)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index c80b0a21d709..083e5f05a7f0 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -194,7 +194,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
size = IO_PAGE_ALIGN(size);
order = get_order(size);
- if (unlikely(order > MAX_ORDER))
+ if (unlikely(order > MAX_PAGE_ORDER))
return NULL;
npages = size >> IO_PAGE_SHIFT;
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 7bcaa3d5ea44..b23d59313589 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -502,3 +502,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 08ffd17d5ec3..523a6e5ee925 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
/* Now allocate error trap reporting scoreboard. */
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
if ((PAGE_SIZE << order) >= sz)
break;
}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 063556fe2cb1..59669ebddd4e 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -3,7 +3,6 @@
#
asflags-y := -ansi -DST_DIV0=0x02
-ccflags-y := -Werror
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 871354aa3c00..809d993f6d88 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -3,7 +3,6 @@
#
asflags-y := -ansi
-ccflags-y := -Werror
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 5e2931a18409..6acd8a4c1e2a 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -402,8 +402,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
unsigned long new_rss_limit;
gfp_t gfp_flags;
- if (max_tsb_size > PAGE_SIZE << MAX_ORDER)
- max_tsb_size = PAGE_SIZE << MAX_ORDER;
+ if (max_tsb_size > PAGE_SIZE << MAX_PAGE_ORDER)
+ max_tsb_size = PAGE_SIZE << MAX_PAGE_ORDER;
new_cache_index = 0;
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
index 397b79af77f7..a1adc75d8055 100644
--- a/arch/sparc/prom/Makefile
+++ b/arch/sparc/prom/Makefile
@@ -3,7 +3,6 @@
# Linux.
#
asflags := -ansi
-ccflags := -Werror
lib-y := bootstr_$(BITS).o
lib-y += init_$(BITS).o
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index b1bfed0c8528..7a9820797eae 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -373,10 +373,10 @@ int __init linux_main(int argc, char **argv)
max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;
/*
- * Zones have to begin on a 1 << MAX_ORDER page boundary,
+ * Zones have to begin on a 1 << MAX_PAGE_ORDER page boundary,
* so this makes sure that's true for highmem
*/
- max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
+ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_PAGE_ORDER)) - 1);
if (physmem_size + iomem_size > max_physmem) {
highmem = physmem_size + iomem_size - max_physmem;
physmem_size -= highmem;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1566748f16c4..53f2e7797b1d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -88,6 +88,7 @@ config X86
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_COPY_MC if X86_64
@@ -169,7 +170,7 @@ config X86
select HAS_IOPORT
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
- select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_HUGE_VMALLOC if X86_64
@@ -384,10 +385,6 @@ config HAVE_INTEL_TXT
def_bool y
depends on INTEL_IOMMU && ACPI
-config X86_32_SMP
- def_bool y
- depends on X86_32 && SMP
-
config X86_64_SMP
def_bool y
depends on X86_64 && SMP
@@ -1415,7 +1412,7 @@ config HIGHMEM4G
config HIGHMEM64G
bool "64GB"
- depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
+ depends on X86_HAVE_PAE
select X86_PAE
help
Select this if you have a 32-bit processor and more than 4
@@ -1472,7 +1469,7 @@ config HIGHMEM
config X86_PAE
bool "PAE (Physical Address Extension) Support"
- depends on X86_32 && !HIGHMEM4G
+ depends on X86_32 && X86_HAVE_PAE
select PHYS_ADDR_T_64BIT
select SWIOTLB
help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 00468adf180f..b9224cf2ee4d 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -362,9 +362,13 @@ config X86_TSC
def_bool y
depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
+config X86_HAVE_PAE
+ def_bool y
+ depends on MCRUSOE || MEFFICEON || MCYRIXIII || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC7 || MCORE2 || MATOM || X86_64
+
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
+ depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
# this should be set for all -march=.. options where the compiler
# generates cmov.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 71fc531b95b4..f19c038409aa 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -53,7 +53,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
-# sev.c indirectly inludes inat-table.h which is generated during
+# sev.c indirectly includes inat-table.h which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
index 473ba59b82a8..d040080d7edb 100644
--- a/arch/x86/boot/compressed/ident_map_64.c
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -386,3 +386,8 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
*/
kernel_add_identity_map(address, end);
}
+
+void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code)
+{
+ /* Empty handler to ignore NMI during early boot */
+}
diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
index 3cdf94b41456..d100284bbef4 100644
--- a/arch/x86/boot/compressed/idt_64.c
+++ b/arch/x86/boot/compressed/idt_64.c
@@ -61,6 +61,7 @@ void load_stage2_idt(void)
boot_idt_desc.address = (unsigned long)boot_idt;
set_idt_entry(X86_TRAP_PF, boot_page_fault);
+ set_idt_entry(X86_TRAP_NMI, boot_nmi_trap);
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
diff --git a/arch/x86/boot/compressed/idt_handlers_64.S b/arch/x86/boot/compressed/idt_handlers_64.S
index 22890e199f5b..4d03c8562f63 100644
--- a/arch/x86/boot/compressed/idt_handlers_64.S
+++ b/arch/x86/boot/compressed/idt_handlers_64.S
@@ -70,6 +70,7 @@ SYM_FUNC_END(\name)
.code64
EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1
+EXCEPTION_HANDLER boot_nmi_trap do_boot_nmi_trap error_code=0
#ifdef CONFIG_AMD_MEM_ENCRYPT
EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1
diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index b3c3a4be7471..dbba332e4a12 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -8,7 +8,7 @@
/*
* accept_memory() and process_unaccepted_memory() called from EFI stub which
- * runs before decompresser and its early_tdx_detect().
+ * runs before decompressor and its early_tdx_detect().
*
* Enumerate TDX directly from the early users.
*/
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index c0d502bd8716..bc2f0f17fb90 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -196,6 +196,7 @@ static inline void cleanup_exception_handling(void) { }
/* IDT Entry Points */
void boot_page_fault(void);
+void boot_nmi_trap(void);
void boot_stage1_vc(void);
void boot_stage2_vc(void);
diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c
index 40031a614712..5941f930f6c5 100644
--- a/arch/x86/boot/pm.c
+++ b/arch/x86/boot/pm.c
@@ -11,6 +11,7 @@
*/
#include "boot.h"
+#include <asm/desc_defs.h>
#include <asm/segment.h>
/*
@@ -67,13 +68,13 @@ static void setup_gdt(void)
being 8-byte unaligned. Intel recommends 16 byte alignment. */
static const u64 boot_gdt[] __attribute__((aligned(16))) = {
/* CS: code, read/execute, 4 GB, base 0 */
- [GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
+ [GDT_ENTRY_BOOT_CS] = GDT_ENTRY(DESC_CODE32, 0, 0xfffff),
/* DS: data, read/write, 4 GB, base 0 */
- [GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
+ [GDT_ENTRY_BOOT_DS] = GDT_ENTRY(DESC_DATA32, 0, 0xfffff),
/* TSS: 32-bit tss, 104 bytes, base 4096 */
/* We only have a TSS here to keep Intel VT happy;
we don't actually use it for anything. */
- [GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(0x0089, 4096, 103),
+ [GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(DESC_TSS32, 4096, 103),
};
/* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
of the gdt_ptr contents. Thus, make it static so it will
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 1c8541ae3b3a..c23f3b9c84fe 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -49,7 +49,7 @@ int strcmp(const char *str1, const char *str2)
{
const unsigned char *s1 = (const unsigned char *)str1;
const unsigned char *s2 = (const unsigned char *)str2;
- int delta = 0;
+ int delta;
while (*s1 || *s2) {
delta = *s1 - *s2;
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index cf1f13c82175..c1cb90369915 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -887,7 +887,7 @@ void __init tdx_early_init(void)
* there.
*
* Intel-TDX has a secure RDMSR hypercall, but that needs to be
- * implemented seperately in the low level startup ASM code.
+ * implemented separately in the low level startup ASM code.
* Until that is in place, disable parallel bringup for TDX.
*/
x86_cpuinit.parallel_bringup = false;
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 187f913cc239..411d8c83e88a 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -666,7 +666,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.ifc \operation, dec
movdqa %xmm1, %xmm3
- pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+ pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn)
mov \PLAIN_CYPH_LEN, %r10
add %r13, %r10
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 74dd230973cf..8c9749ed0651 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -747,7 +747,7 @@ VARIABLE_OFFSET = 16*8
.if \ENC_DEC == DEC
vmovdqa %xmm1, %xmm3
- pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+ pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn)
mov \PLAIN_CYPH_LEN, %r10
add %r13, %r10
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 81ce0f4db555..bbcff1fb78cb 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -184,7 +184,7 @@ SYM_FUNC_START(crc_pcl)
xor crc1,crc1
xor crc2,crc2
- # Fall thruogh into top of crc array (crc_128)
+ # Fall through into top of crc array (crc_128)
################################################################
## 3) CRC Array:
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index d902b8ea0721..5bfce4b045fd 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -84,7 +84,7 @@ frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
-# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
+# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even
# Input message (arg1)
#define MSG(i) 8*i(msg)
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 65be30156816..30a2c4777f9d 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -82,7 +82,7 @@ frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
-# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
+# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even
# Input message (arg1)
#define MSG(i) 8*i(msg)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index f6907627172b..9f1d94790a54 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -175,8 +175,7 @@ For 32-bit we have the following conventions - kernel is built with
#define THIS_CPU_user_pcid_flush_mask \
PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
-.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
- ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+.macro SWITCH_TO_USER_CR3 scratch_reg:req scratch_reg2:req
mov %cr3, \scratch_reg
ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
@@ -206,13 +205,20 @@ For 32-bit we have the following conventions - kernel is built with
/* Flip the PGD to the user version */
orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
mov \scratch_reg, %cr3
+.endm
+
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ SWITCH_TO_USER_CR3 \scratch_reg \scratch_reg2
.Lend_\@:
.endm
.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
pushq %rax
- SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
+ SWITCH_TO_USER_CR3 scratch_reg=\scratch_reg scratch_reg2=%rax
popq %rax
+.Lend_\@:
.endm
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index de6469dffe3a..c40f89ab1b4c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -559,17 +559,27 @@ __irqentry_text_end:
SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
IBRS_EXIT
-#ifdef CONFIG_DEBUG_ENTRY
- /* Assert that pt_regs indicates user mode. */
- testb $3, CS(%rsp)
- jnz 1f
- ud2
-1:
-#endif
#ifdef CONFIG_XEN_PV
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ ALTERNATIVE "", "jmp .Lpti_restore_regs_and_return_to_usermode", X86_FEATURE_PTI
+#endif
+
+ STACKLEAK_ERASE
+ POP_REGS
+ add $8, %rsp /* orig_ax */
+ UNWIND_HINT_IRET_REGS
+
+.Lswapgs_and_iret:
+ swapgs
+ /* Assert that the IRET frame indicates user mode. */
+ testb $3, 8(%rsp)
+ jnz .Lnative_iret
+ ud2
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+.Lpti_restore_regs_and_return_to_usermode:
POP_REGS pop_rdi=0
/*
@@ -596,13 +606,14 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
*/
STACKLEAK_ERASE_NOCLOBBER
- SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+ push %rax
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi scratch_reg2=%rax
+ pop %rax
/* Restore RDI. */
popq %rdi
- swapgs
- jmp .Lnative_iret
-
+ jmp .Lswapgs_and_iret
+#endif
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index c8fac5205803..5f8591ce7f25 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -461,3 +461,8 @@
454 i386 futex_wake sys_futex_wake
455 i386 futex_wait sys_futex_wait
456 i386 futex_requeue sys_futex_requeue
+457 i386 statmount sys_statmount
+458 i386 listmount sys_listmount
+459 i386 lsm_get_self_attr sys_lsm_get_self_attr
+460 i386 lsm_set_self_attr sys_lsm_set_self_attr
+461 i386 lsm_list_modules sys_lsm_list_modules
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 8cb8bf68721c..7e8d46f4147f 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -378,6 +378,11 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index ed308719236c..780acd3dff22 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -125,7 +125,7 @@ int amd_brs_hw_config(struct perf_event *event)
* Where X is the number of taken branches due to interrupt
* skid. Skid is large.
*
- * Where Y is the occurences of the event while BRS is
+ * Where Y is the occurrences of the event while BRS is
* capturing the lbr_nr entries.
*
* By using retired taken branches, we limit the impact on the
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e24976593a29..81f6d8275b6b 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -940,7 +940,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
continue;
if (has_branch_stack(event))
- perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
+ perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
@@ -1184,7 +1184,7 @@ static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
* period of each one and given that the BRS saturates, it would not be possible
* to guarantee correlated content for all events. Therefore, in situations
* where multiple events want to use BRS, the kernel enforces mutual exclusion.
- * Exclusion is enforced by chosing only one counter for events using BRS.
+ * Exclusion is enforced by choosing only one counter for events using BRS.
* The event scheduling logic will then automatically multiplex the
* events and ensure that at most one event is actively using BRS.
*
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 6911c5399d02..e91970b01d62 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -287,6 +287,9 @@ static int perf_ibs_init(struct perf_event *event)
if (config & ~perf_ibs->config_mask)
return -EINVAL;
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
ret = validate_group(event);
if (ret)
return ret;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 40ad1425ffa2..09050641ce5d 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -601,7 +601,7 @@ int x86_pmu_hw_config(struct perf_event *event)
}
}
- if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
+ if (branch_sample_call_stack(event))
event->attach_state |= PERF_ATTACH_TASK_DATA;
/*
@@ -1702,7 +1702,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event))
- perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
+ perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ce1c777227b4..3804f21ab049 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2527,9 +2527,14 @@ static void intel_pmu_assign_event(struct perf_event *event, int idx)
perf_report_aux_output_id(event, idx);
}
+static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
+{
+ return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
+}
+
static void intel_pmu_del_event(struct perf_event *event)
{
- if (needs_branch_stack(event))
+ if (intel_pmu_needs_branch_stack(event))
intel_pmu_lbr_del(event);
if (event->attr.precise_ip)
intel_pmu_pebs_del(event);
@@ -2787,6 +2792,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
static void intel_pmu_enable_event(struct perf_event *event)
{
+ u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -2795,8 +2801,10 @@ static void intel_pmu_enable_event(struct perf_event *event)
switch (idx) {
case 0 ... INTEL_PMC_IDX_FIXED - 1:
+ if (branch_sample_counters(event))
+ enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
intel_set_masks(event, idx);
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
+ __x86_pmu_enable_event(hwc, enable_mask);
break;
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
@@ -2820,7 +2828,7 @@ static void intel_pmu_add_event(struct perf_event *event)
{
if (event->attr.precise_ip)
intel_pmu_pebs_add(event);
- if (needs_branch_stack(event))
+ if (intel_pmu_needs_branch_stack(event))
intel_pmu_lbr_add(event);
}
@@ -3047,7 +3055,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event))
- perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
+ intel_pmu_lbr_save_brstack(&data, cpuc, event);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
@@ -3612,6 +3620,13 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (cpuc->excl_cntrs)
return intel_get_excl_constraints(cpuc, event, idx, c2);
+ /* Not all counters support the branch counter feature. */
+ if (branch_sample_counters(event)) {
+ c2 = dyn_constraint(cpuc, c2, idx);
+ c2->idxmsk64 &= x86_pmu.lbr_counters;
+ c2->weight = hweight64(c2->idxmsk64);
+ }
+
return c2;
}
@@ -3897,7 +3912,62 @@ static int intel_pmu_hw_config(struct perf_event *event)
x86_pmu.pebs_aliases(event);
}
- if (needs_branch_stack(event)) {
+ if (needs_branch_stack(event) && is_sampling_event(event))
+ event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
+
+ if (branch_sample_counters(event)) {
+ struct perf_event *leader, *sibling;
+ int num = 0;
+
+ if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
+ (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
+ return -EINVAL;
+
+ /*
+ * The branch counter logging is not supported in the call stack
+ * mode yet, since we cannot simply flush the LBR during e.g.,
+ * multiplexing. Also, there is no obvious usage with the call
+ * stack mode. Simply forbids it for now.
+ *
+ * If any events in the group enable the branch counter logging
+ * feature, the group is treated as a branch counter logging
+ * group, which requires the extra space to store the counters.
+ */
+ leader = event->group_leader;
+ if (branch_sample_call_stack(leader))
+ return -EINVAL;
+ if (branch_sample_counters(leader))
+ num++;
+ leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
+
+ for_each_sibling_event(sibling, leader) {
+ if (branch_sample_call_stack(sibling))
+ return -EINVAL;
+ if (branch_sample_counters(sibling))
+ num++;
+ }
+
+ if (num > fls(x86_pmu.lbr_counters))
+ return -EINVAL;
+ /*
+ * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
+ * require any branch stack setup.
+ * Clear the bit to avoid unnecessary branch stack setup.
+ */
+ if (0 == (event->attr.branch_sample_type &
+ ~(PERF_SAMPLE_BRANCH_PLM_ALL |
+ PERF_SAMPLE_BRANCH_COUNTERS)))
+ event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
+
+ /*
+ * Force the leader to be a LBR event. So LBRs can be reset
+ * with the leader event. See intel_pmu_lbr_del() for details.
+ */
+ if (!intel_pmu_needs_branch_stack(leader))
+ return -EINVAL;
+ }
+
+ if (intel_pmu_needs_branch_stack(event)) {
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
@@ -4027,7 +4097,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
/*
* Currently, the only caller of this function is the atomic_switch_perf_msrs().
- * The host perf conext helps to prepare the values of the real hardware for
+ * The host perf context helps to prepare the values of the real hardware for
* a set of msrs that need to be switched atomically in a vmx transaction.
*
* For example, the pseudocode needed to add a new msr should look like:
@@ -4051,12 +4121,17 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
int global_ctrl, pebs_enable;
+ /*
+ * In addition to obeying exclude_guest/exclude_host, remove bits being
+ * used for PEBS when running a guest, because PEBS writes to virtual
+ * addresses (not physical addresses).
+ */
*nr = 0;
global_ctrl = (*nr)++;
arr[global_ctrl] = (struct perf_guest_switch_msr){
.msr = MSR_CORE_PERF_GLOBAL_CTRL,
.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
- .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
+ .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
};
if (!x86_pmu.pebs)
@@ -4375,8 +4450,13 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
*/
if (event->attr.precise_ip == 3) {
/* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
- if (constraint_match(&fixed0_constraint, event->hw.config))
- return &fixed0_counter0_1_constraint;
+ if (constraint_match(&fixed0_constraint, event->hw.config)) {
+ /* The fixed counter 0 doesn't support LBR event logging. */
+ if (branch_sample_counters(event))
+ return &counter0_1_constraint;
+ else
+ return &fixed0_counter0_1_constraint;
+ }
switch (c->idxmsk64 & 0x3ull) {
case 0x1:
@@ -4555,7 +4635,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
goto err;
}
- if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
+ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
@@ -5527,11 +5607,41 @@ static ssize_t branches_show(struct device *cdev,
static DEVICE_ATTR_RO(branches);
+static ssize_t branch_counter_nr_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
+}
+
+static DEVICE_ATTR_RO(branch_counter_nr);
+
+static ssize_t branch_counter_width_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
+}
+
+static DEVICE_ATTR_RO(branch_counter_width);
+
static struct attribute *lbr_attrs[] = {
&dev_attr_branches.attr,
+ &dev_attr_branch_counter_nr.attr,
+ &dev_attr_branch_counter_width.attr,
NULL
};
+static umode_t
+lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ /* branches */
+ if (i == 0)
+ return x86_pmu.lbr_nr ? attr->mode : 0;
+
+ return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
+}
+
static char pmu_name_str[30];
static ssize_t pmu_name_show(struct device *cdev,
@@ -5559,6 +5669,15 @@ static struct attribute *intel_pmu_attrs[] = {
};
static umode_t
+default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ if (attr == &dev_attr_allow_tsx_force_abort.attr)
+ return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
+
+ return attr->mode;
+}
+
+static umode_t
tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{
return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
@@ -5580,26 +5699,11 @@ mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
}
static umode_t
-lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
-{
- return x86_pmu.lbr_nr ? attr->mode : 0;
-}
-
-static umode_t
exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{
return x86_pmu.version >= 2 ? attr->mode : 0;
}
-static umode_t
-default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
-{
- if (attr == &dev_attr_allow_tsx_force_abort.attr)
- return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
-
- return attr->mode;
-}
-
static struct attribute_group group_events_td = {
.name = "events",
};
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index cbeb6d2bf5b4..4b50a3a9818a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -41,7 +41,7 @@
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
* Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
- * MTL
+ * MTL,SRF,GRR
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
@@ -52,7 +52,8 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- * TGL,TNT,RKL,ADL,RPL,SPR,MTL
+ * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF,
+ * GRR
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -75,7 +76,7 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- * TGL,TNT,RKL,ADL,RPL,SPR,MTL
+ * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -97,6 +98,10 @@
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
* TNT,RKL,ADL,RPL,MTL
* Scope: Package (physical package)
+ * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter.
+ * perf code: 0x00
+ * Available model: SRF,GRR
+ * Scope: A cluster of cores shared L2 cache
*
*/
@@ -130,6 +135,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev,
struct cstate_model {
unsigned long core_events;
unsigned long pkg_events;
+ unsigned long module_events;
unsigned long quirks;
};
@@ -189,20 +195,20 @@ static struct attribute *attrs_empty[] = {
* "events" group (with empty attrs) before updating
* it with detected events.
*/
-static struct attribute_group core_events_attr_group = {
+static struct attribute_group cstate_events_attr_group = {
.name = "events",
.attrs = attrs_empty,
};
-DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
-static struct attribute *core_format_attrs[] = {
- &format_attr_core_event.attr,
+DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63");
+static struct attribute *cstate_format_attrs[] = {
+ &format_attr_cstate_event.attr,
NULL,
};
-static struct attribute_group core_format_attr_group = {
+static struct attribute_group cstate_format_attr_group = {
.name = "format",
- .attrs = core_format_attrs,
+ .attrs = cstate_format_attrs,
};
static cpumask_t cstate_core_cpu_mask;
@@ -217,9 +223,9 @@ static struct attribute_group cpumask_attr_group = {
.attrs = cstate_cpumask_attrs,
};
-static const struct attribute_group *core_attr_groups[] = {
- &core_events_attr_group,
- &core_format_attr_group,
+static const struct attribute_group *cstate_attr_groups[] = {
+ &cstate_events_attr_group,
+ &cstate_format_attr_group,
&cpumask_attr_group,
NULL,
};
@@ -268,30 +274,30 @@ static struct perf_msr pkg_msr[] = {
[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
};
-static struct attribute_group pkg_events_attr_group = {
- .name = "events",
- .attrs = attrs_empty,
-};
+static cpumask_t cstate_pkg_cpu_mask;
-DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
-static struct attribute *pkg_format_attrs[] = {
- &format_attr_pkg_event.attr,
- NULL,
-};
-static struct attribute_group pkg_format_attr_group = {
- .name = "format",
- .attrs = pkg_format_attrs,
+/* cstate_module PMU */
+static struct pmu cstate_module_pmu;
+static bool has_cstate_module;
+
+enum perf_cstate_module_events {
+ PERF_CSTATE_MODULE_C6_RES = 0,
+
+ PERF_CSTATE_MODULE_EVENT_MAX,
};
-static cpumask_t cstate_pkg_cpu_mask;
+PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00");
-static const struct attribute_group *pkg_attr_groups[] = {
- &pkg_events_attr_group,
- &pkg_format_attr_group,
- &cpumask_attr_group,
- NULL,
+static unsigned long module_msr_mask;
+
+PMU_EVENT_GROUP(events, cstate_module_c6);
+
+static struct perf_msr module_msr[] = {
+ [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr },
};
+static cpumask_t cstate_module_cpu_mask;
+
static ssize_t cstate_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -302,6 +308,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev,
return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
else if (pmu == &cstate_pkg_pmu)
return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
+ else if (pmu == &cstate_module_pmu)
+ return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask);
else
return 0;
}
@@ -342,6 +350,15 @@ static int cstate_pmu_event_init(struct perf_event *event)
event->hw.event_base = pkg_msr[cfg].msr;
cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
topology_die_cpumask(event->cpu));
+ } else if (event->pmu == &cstate_module_pmu) {
+ if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX)
+ return -EINVAL;
+ cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX);
+ if (!(module_msr_mask & (1 << cfg)))
+ return -EINVAL;
+ event->hw.event_base = module_msr[cfg].msr;
+ cpu = cpumask_any_and(&cstate_module_cpu_mask,
+ topology_cluster_cpumask(event->cpu));
} else {
return -ENOENT;
}
@@ -429,6 +446,17 @@ static int cstate_cpu_exit(unsigned int cpu)
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
}
}
+
+ if (has_cstate_module &&
+ cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) {
+
+ target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu);
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &cstate_module_cpu_mask);
+ perf_pmu_migrate_context(&cstate_module_pmu, cpu, target);
+ }
+ }
return 0;
}
@@ -455,6 +483,15 @@ static int cstate_cpu_init(unsigned int cpu)
if (has_cstate_pkg && target >= nr_cpu_ids)
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
+ /*
+ * If this is the first online thread of that cluster, set it
+ * in the cluster cpu mask as the designated reader.
+ */
+ target = cpumask_any_and(&cstate_module_cpu_mask,
+ topology_cluster_cpumask(cpu));
+ if (has_cstate_module && target >= nr_cpu_ids)
+ cpumask_set_cpu(cpu, &cstate_module_cpu_mask);
+
return 0;
}
@@ -477,8 +514,13 @@ static const struct attribute_group *pkg_attr_update[] = {
NULL,
};
+static const struct attribute_group *module_attr_update[] = {
+ &group_cstate_module_c6,
+ NULL
+};
+
static struct pmu cstate_core_pmu = {
- .attr_groups = core_attr_groups,
+ .attr_groups = cstate_attr_groups,
.attr_update = core_attr_update,
.name = "cstate_core",
.task_ctx_nr = perf_invalid_context,
@@ -493,7 +535,7 @@ static struct pmu cstate_core_pmu = {
};
static struct pmu cstate_pkg_pmu = {
- .attr_groups = pkg_attr_groups,
+ .attr_groups = cstate_attr_groups,
.attr_update = pkg_attr_update,
.name = "cstate_pkg",
.task_ctx_nr = perf_invalid_context,
@@ -507,6 +549,21 @@ static struct pmu cstate_pkg_pmu = {
.module = THIS_MODULE,
};
+static struct pmu cstate_module_pmu = {
+ .attr_groups = cstate_attr_groups,
+ .attr_update = module_attr_update,
+ .name = "cstate_module",
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = cstate_pmu_event_init,
+ .add = cstate_pmu_event_add,
+ .del = cstate_pmu_event_del,
+ .start = cstate_pmu_event_start,
+ .stop = cstate_pmu_event_stop,
+ .read = cstate_pmu_event_update,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
+ .module = THIS_MODULE,
+};
+
static const struct cstate_model nhm_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
@@ -621,6 +678,22 @@ static const struct cstate_model glm_cstates __initconst = {
BIT(PERF_CSTATE_PKG_C10_RES),
};
+static const struct cstate_model grr_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
+};
+
+static const struct cstate_model srf_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
+
+ .module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
+};
+
static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
@@ -673,6 +746,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
@@ -714,10 +789,14 @@ static int __init cstate_probe(const struct cstate_model *cm)
pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
true, (void *) &cm->pkg_events);
+ module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX,
+ true, (void *) &cm->module_events);
+
has_cstate_core = !!core_msr_mask;
has_cstate_pkg = !!pkg_msr_mask;
+ has_cstate_module = !!module_msr_mask;
- return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
+ return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV;
}
static inline void cstate_cleanup(void)
@@ -730,6 +809,9 @@ static inline void cstate_cleanup(void)
if (has_cstate_pkg)
perf_pmu_unregister(&cstate_pkg_pmu);
+
+ if (has_cstate_module)
+ perf_pmu_unregister(&cstate_module_pmu);
}
static int __init cstate_init(void)
@@ -766,6 +848,16 @@ static int __init cstate_init(void)
return err;
}
}
+
+ if (has_cstate_module) {
+ err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1);
+ if (err) {
+ has_cstate_module = false;
+ pr_info("Failed to register cstate cluster pmu\n");
+ cstate_cleanup();
+ return err;
+ }
+ }
return 0;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index bf97ab904d40..d49d661ec0a7 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1755,7 +1755,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
setup_pebs_time(event, data, pebs->tsc);
if (has_branch_stack(event))
- perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
+ perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL);
}
static void adaptive_pebs_save_regs(struct pt_regs *regs,
@@ -1912,7 +1912,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr);
- perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
+ intel_pmu_lbr_save_brstack(data, cpuc, event);
}
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index c3b0d15a9841..78cd5084104e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -676,6 +676,25 @@ void intel_pmu_lbr_del(struct perf_event *event)
WARN_ON_ONCE(cpuc->lbr_users < 0);
WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
perf_sched_cb_dec(event->pmu);
+
+ /*
+ * The logged occurrences information is only valid for the
+ * current LBR group. If another LBR group is scheduled in
+ * later, the information from the stale LBRs will be wrongly
+ * interpreted. Reset the LBRs here.
+ *
+ * Only clear once for a branch counter group with the leader
+ * event. Because
+ * - Cannot simply reset the LBRs with the !cpuc->lbr_users.
+ * Because it's possible that the last LBR user is not in a
+ * branch counter group, e.g., a branch_counters group +
+ * several normal LBR events.
+ * - The LBR reset can be done with any one of the events in a
+ * branch counter group, since they are always scheduled together.
+ * It's easy to force the leader event an LBR event.
+ */
+ if (is_branch_counters_group(event) && event == event->group_leader)
+ intel_pmu_lbr_reset();
}
static inline bool vlbr_exclude_host(void)
@@ -866,6 +885,8 @@ static __always_inline u16 get_lbr_cycles(u64 info)
return cycles;
}
+static_assert((64 - PERF_BRANCH_ENTRY_INFO_BITS_MAX) > LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS);
+
static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
struct lbr_entry *entries)
{
@@ -898,11 +919,67 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
e->abort = !!(info & LBR_INFO_ABORT);
e->cycles = get_lbr_cycles(info);
e->type = get_lbr_br_type(info);
+
+ /*
+ * Leverage the reserved field of cpuc->lbr_entries[i] to
+ * temporarily store the branch counters information.
+ * The later code will decide what content can be disclosed
+ * to the perf tool. Pleae see intel_pmu_lbr_counters_reorder().
+ */
+ e->reserved = (info >> LBR_INFO_BR_CNTR_OFFSET) & LBR_INFO_BR_CNTR_FULL_MASK;
}
cpuc->lbr_stack.nr = i;
}
+/*
+ * The enabled order may be different from the counter order.
+ * Update the lbr_counters with the enabled order.
+ */
+static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int i, j, pos = 0, order[X86_PMC_IDX_MAX];
+ struct perf_event *leader, *sibling;
+ u64 src, dst, cnt;
+
+ leader = event->group_leader;
+ if (branch_sample_counters(leader))
+ order[pos++] = leader->hw.idx;
+
+ for_each_sibling_event(sibling, leader) {
+ if (!branch_sample_counters(sibling))
+ continue;
+ order[pos++] = sibling->hw.idx;
+ }
+
+ WARN_ON_ONCE(!pos);
+
+ for (i = 0; i < cpuc->lbr_stack.nr; i++) {
+ src = cpuc->lbr_entries[i].reserved;
+ dst = 0;
+ for (j = 0; j < pos; j++) {
+ cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK;
+ dst |= cnt << j * LBR_INFO_BR_CNTR_BITS;
+ }
+ cpuc->lbr_counters[i] = dst;
+ cpuc->lbr_entries[i].reserved = 0;
+ }
+}
+
+void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
+ struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ if (is_branch_counters_group(event)) {
+ intel_pmu_lbr_counters_reorder(cpuc, event);
+ perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters);
+ return;
+ }
+
+ perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL);
+}
+
static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
{
intel_pmu_store_lbr(cpuc, NULL);
@@ -1173,8 +1250,10 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
for (i = 0; i < cpuc->lbr_stack.nr; ) {
if (!cpuc->lbr_entries[i].from) {
j = i;
- while (++j < cpuc->lbr_stack.nr)
+ while (++j < cpuc->lbr_stack.nr) {
cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
+ cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j];
+ }
cpuc->lbr_stack.nr--;
if (!cpuc->lbr_entries[i].from)
continue;
@@ -1525,8 +1604,12 @@ void __init intel_pmu_arch_lbr_init(void)
x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
+ x86_pmu.lbr_counters = ecx.split.lbr_counters;
x86_pmu.lbr_nr = lbr_nr;
+ if (!!x86_pmu.lbr_counters)
+ x86_pmu.flags |= PMU_FL_BR_CNTR;
+
if (x86_pmu.lbr_mispred)
static_branch_enable(&x86_lbr_mispred);
if (x86_pmu.lbr_timed_lbr)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 01023aa5125b..7927c0b832fa 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1814,6 +1814,14 @@ static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
.uncore_units_ignore = spr_uncore_units_ignore,
};
+static const struct intel_uncore_init_fun gnr_uncore_init __initconst = {
+ .cpu_init = gnr_uncore_cpu_init,
+ .pci_init = gnr_uncore_pci_init,
+ .mmio_init = gnr_uncore_mmio_init,
+ .use_discovery = true,
+ .uncore_units_ignore = gnr_uncore_units_ignore,
+};
+
static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
.cpu_init = intel_uncore_generic_uncore_cpu_init,
.pci_init = intel_uncore_generic_uncore_pci_init,
@@ -1865,8 +1873,12 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init),
{},
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index c30fb5bb1222..4838502d89ae 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -72,9 +72,9 @@ struct intel_uncore_type {
unsigned single_fixed:1;
unsigned pair_ctr_ctl:1;
union {
- unsigned *msr_offsets;
- unsigned *pci_offsets;
- unsigned *mmio_offsets;
+ u64 *msr_offsets;
+ u64 *pci_offsets;
+ u64 *mmio_offsets;
};
unsigned *box_ids;
struct event_constraint unconstrainted;
@@ -593,6 +593,7 @@ extern struct list_head pci2phy_map_head;
extern struct pci_extra_dev *uncore_extra_pci_dev;
extern struct event_constraint uncore_constraint_empty;
extern int spr_uncore_units_ignore[];
+extern int gnr_uncore_units_ignore[];
/* uncore_snb.c */
int snb_uncore_pci_init(void);
@@ -634,6 +635,9 @@ void icx_uncore_mmio_init(void);
int spr_uncore_pci_init(void);
void spr_uncore_cpu_init(void);
void spr_uncore_mmio_init(void);
+int gnr_uncore_pci_init(void);
+void gnr_uncore_cpu_init(void);
+void gnr_uncore_mmio_init(void);
/* uncore_nhmex.c */
void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
index cb488e41807c..9a698a92962a 100644
--- a/arch/x86/events/intel/uncore_discovery.c
+++ b/arch/x86/events/intel/uncore_discovery.c
@@ -125,7 +125,8 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
int die, bool parsed)
{
struct intel_uncore_discovery_type *type;
- unsigned int *box_offset, *ids;
+ unsigned int *ids;
+ u64 *box_offset;
int i;
if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
@@ -153,7 +154,7 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
if (!type)
return;
- box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
+ box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL);
if (!box_offset)
return;
diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h
index 6ee80ad3423e..22e769a81103 100644
--- a/arch/x86/events/intel/uncore_discovery.h
+++ b/arch/x86/events/intel/uncore_discovery.h
@@ -125,7 +125,7 @@ struct intel_uncore_discovery_type {
u8 ctr_offset; /* Counter 0 offset */
u16 num_boxes; /* number of boxes for the uncore block */
unsigned int *ids; /* Box IDs */
- unsigned int *box_offset; /* Box offset */
+ u64 *box_offset; /* Box offset */
};
bool intel_uncore_has_discovery_tables(int *ignore);
diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index 173e2674be6e..56eea2c66cfb 100644
--- a/arch/x86/events/intel/uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -306,7 +306,7 @@ static const struct attribute_group nhmex_uncore_cbox_format_group = {
};
/* msr offset for each instance of cbox */
-static unsigned nhmex_cbox_msr_offsets[] = {
+static u64 nhmex_cbox_msr_offsets[] = {
0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 8250f0f59c2b..a96496bef678 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1396,6 +1396,29 @@ err:
return ret;
}
+static int topology_gidnid_map(int nodeid, u32 gidnid)
+{
+ int i, die_id = -1;
+
+ /*
+ * every three bits in the Node ID mapping register maps
+ * to a particular node.
+ */
+ for (i = 0; i < 8; i++) {
+ if (nodeid == GIDNIDMAP(gidnid, i)) {
+ if (topology_max_die_per_package() > 1)
+ die_id = i;
+ else
+ die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
+ break;
+ }
+ }
+
+ return die_id;
+}
+
/*
* build pci bus to socket mapping
*/
@@ -1435,22 +1458,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
break;
}
- /*
- * every three bits in the Node ID mapping register maps
- * to a particular node.
- */
- for (i = 0; i < 8; i++) {
- if (nodeid == GIDNIDMAP(config, i)) {
- if (topology_max_die_per_package() > 1)
- die_id = i;
- else
- die_id = topology_phys_to_logical_pkg(i);
- if (die_id < 0)
- die_id = -ENODEV;
- map->pbus_to_dieid[bus] = die_id;
- break;
- }
- }
+ map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
raw_spin_unlock(&pci2phy_map_lock);
} else {
segment = pci_domain_nr(ubox_dev->bus);
@@ -5278,7 +5286,7 @@ void snr_uncore_mmio_init(void)
/* ICX uncore support */
-static unsigned icx_cha_msr_offsets[] = {
+static u64 icx_cha_msr_offsets[] = {
0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
@@ -5326,7 +5334,7 @@ static struct intel_uncore_type icx_uncore_chabox = {
.format_group = &snr_uncore_chabox_format_group,
};
-static unsigned icx_msr_offsets[] = {
+static u64 icx_msr_offsets[] = {
0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
};
@@ -5596,7 +5604,7 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
struct pci_dev *ubox = NULL;
struct pci_dev *dev = NULL;
u32 nid, gid;
- int i, idx, ret = -EPERM;
+ int idx, lgc_pkg, ret = -EPERM;
struct intel_uncore_topology *upi;
unsigned int devfn;
@@ -5611,20 +5619,21 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
break;
}
- for (i = 0; i < 8; i++) {
- if (nid != GIDNIDMAP(gid, i))
- continue;
- for (idx = 0; idx < type->num_boxes; idx++) {
- upi = &type->topology[nid][idx];
- devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
- dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
- ubox->bus->number,
- devfn);
- if (dev) {
- ret = upi_fill_topology(dev, upi, idx);
- if (ret)
- goto err;
- }
+ lgc_pkg = topology_gidnid_map(nid, gid);
+ if (lgc_pkg < 0) {
+ ret = -EPERM;
+ goto err;
+ }
+ for (idx = 0; idx < type->num_boxes; idx++) {
+ upi = &type->topology[lgc_pkg][idx];
+ devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
+ dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
+ ubox->bus->number,
+ devfn);
+ if (dev) {
+ ret = upi_fill_topology(dev, upi, idx);
+ if (ret)
+ goto err;
}
}
}
@@ -6079,13 +6088,16 @@ static struct uncore_event_desc spr_uncore_imc_events[] = {
{ /* end: all zeroes */ },
};
+#define SPR_UNCORE_MMIO_COMMON_FORMAT() \
+ SPR_UNCORE_COMMON_FORMAT(), \
+ .ops = &spr_uncore_mmio_ops
+
static struct intel_uncore_type spr_uncore_imc = {
- SPR_UNCORE_COMMON_FORMAT(),
+ SPR_UNCORE_MMIO_COMMON_FORMAT(),
.name = "imc",
.fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
- .ops = &spr_uncore_mmio_ops,
.event_descs = spr_uncore_imc_events,
};
@@ -6181,7 +6193,7 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
*/
#define SPR_UNCORE_UPI_NUM_BOXES 4
-static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
+static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
0, 0x8000, 0x10000, 0x18000
};
@@ -6412,7 +6424,8 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
- struct intel_uncore_type **extra)
+ struct intel_uncore_type **extra, int max_num_types,
+ struct intel_uncore_type **uncores)
{
struct intel_uncore_type **types, **start_types;
int i;
@@ -6421,9 +6434,9 @@ uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
/* Only copy the customized features */
for (; *types; types++) {
- if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
+ if ((*types)->type_id >= max_num_types)
continue;
- uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
+ uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
}
for (i = 0; i < num_extra; i++, types++)
@@ -6470,7 +6483,9 @@ void spr_uncore_cpu_init(void)
uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
UNCORE_SPR_MSR_EXTRA_UNCORES,
- spr_msr_uncores);
+ spr_msr_uncores,
+ UNCORE_SPR_NUM_UNCORE_TYPES,
+ spr_uncores);
type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
if (type) {
@@ -6552,7 +6567,9 @@ int spr_uncore_pci_init(void)
spr_update_device_location(UNCORE_SPR_M3UPI);
uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
UNCORE_SPR_PCI_EXTRA_UNCORES,
- spr_pci_uncores);
+ spr_pci_uncores,
+ UNCORE_SPR_NUM_UNCORE_TYPES,
+ spr_uncores);
return 0;
}
@@ -6560,15 +6577,116 @@ void spr_uncore_mmio_init(void)
{
int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
- if (ret)
- uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
- else {
+ if (ret) {
+ uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
+ UNCORE_SPR_NUM_UNCORE_TYPES,
+ spr_uncores);
+ } else {
uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
UNCORE_SPR_MMIO_EXTRA_UNCORES,
- spr_mmio_uncores);
+ spr_mmio_uncores,
+ UNCORE_SPR_NUM_UNCORE_TYPES,
+ spr_uncores);
spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
}
}
/* end of SPR uncore support */
+
+/* GNR uncore support */
+
+#define UNCORE_GNR_NUM_UNCORE_TYPES 23
+#define UNCORE_GNR_TYPE_15 15
+#define UNCORE_GNR_B2UPI 18
+#define UNCORE_GNR_TYPE_21 21
+#define UNCORE_GNR_TYPE_22 22
+
+int gnr_uncore_units_ignore[] = {
+ UNCORE_SPR_UPI,
+ UNCORE_GNR_TYPE_15,
+ UNCORE_GNR_B2UPI,
+ UNCORE_GNR_TYPE_21,
+ UNCORE_GNR_TYPE_22,
+ UNCORE_IGNORE_END
+};
+
+static struct intel_uncore_type gnr_uncore_ubox = {
+ .name = "ubox",
+ .attr_update = uncore_alias_groups,
+};
+
+static struct intel_uncore_type gnr_uncore_b2cmi = {
+ SPR_UNCORE_PCI_COMMON_FORMAT(),
+ .name = "b2cmi",
+};
+
+static struct intel_uncore_type gnr_uncore_b2cxl = {
+ SPR_UNCORE_MMIO_COMMON_FORMAT(),
+ .name = "b2cxl",
+};
+
+static struct intel_uncore_type gnr_uncore_mdf_sbo = {
+ .name = "mdf_sbo",
+ .attr_update = uncore_alias_groups,
+};
+
+static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
+ &spr_uncore_chabox,
+ &spr_uncore_iio,
+ &spr_uncore_irp,
+ NULL,
+ &spr_uncore_pcu,
+ &gnr_uncore_ubox,
+ &spr_uncore_imc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &gnr_uncore_b2cmi,
+ &gnr_uncore_b2cxl,
+ NULL,
+ NULL,
+ &gnr_uncore_mdf_sbo,
+ NULL,
+ NULL,
+};
+
+static struct freerunning_counters gnr_iio_freerunning[] = {
+ [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 },
+ [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 },
+ [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 },
+};
+
+void gnr_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
+ UNCORE_SPR_MSR_EXTRA_UNCORES,
+ spr_msr_uncores,
+ UNCORE_GNR_NUM_UNCORE_TYPES,
+ gnr_uncores);
+ spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+ spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
+}
+
+int gnr_uncore_pci_init(void)
+{
+ uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
+ UNCORE_GNR_NUM_UNCORE_TYPES,
+ gnr_uncores);
+ return 0;
+}
+
+void gnr_uncore_mmio_init(void)
+{
+ uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
+ UNCORE_GNR_NUM_UNCORE_TYPES,
+ gnr_uncores);
+}
+
+/* end of GNR uncore support */
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53dd5d495ba6..fb56518356ec 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -110,6 +110,11 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
+static inline bool is_branch_counters_group(struct perf_event *event)
+{
+ return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
+}
+
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
@@ -283,6 +288,7 @@ struct cpu_hw_events {
int lbr_pebs_users;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
+ u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */
union {
struct er_account *lbr_sel;
struct er_account *lbr_ctl;
@@ -888,6 +894,7 @@ struct x86_pmu {
unsigned int lbr_mispred:1;
unsigned int lbr_timed_lbr:1;
unsigned int lbr_br_type:1;
+ unsigned int lbr_counters:4;
void (*lbr_reset)(void);
void (*lbr_read)(struct cpu_hw_events *cpuc);
@@ -1012,6 +1019,7 @@ do { \
#define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
+#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -1552,6 +1560,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void);
+void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
+ struct cpu_hw_events *cpuc,
+ struct perf_event *event);
+
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc);
diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h
index 1dc19b9b4426..6c977c19f2cd 100644
--- a/arch/x86/events/perf_event_flags.h
+++ b/arch/x86/events/perf_event_flags.h
@@ -20,3 +20,5 @@ PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */
PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */
PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */
PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */
+PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */
+PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 97bfe5f0531f..5fc45543e955 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -209,7 +209,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
/*
* This particular version of the IPI hypercall can
- * only target upto 64 CPUs.
+ * only target up to 64 CPUs.
*/
if (vcpu >= 64)
goto do_ex_hypercall;
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index 42c70d28ef27..3215a4a07408 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -212,7 +212,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* This interrupt is already mapped. Let's unmap first.
*
* We don't use retarget interrupt hypercalls here because
- * Microsoft Hypervisor doens't allow root to change the vector
+ * Microsoft Hypervisor doesn't allow root to change the vector
* or specify VPs outside of the set that is initially used
* during mapping.
*/
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 02e55237d919..7dcbf153ad72 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -144,7 +144,7 @@ void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
/* Tell the hypervisor what went wrong. */
val |= GHCB_SEV_TERM_REASON(set, reason);
- /* Request Guest Termination from Hypvervisor */
+ /* Request Guest Termination from Hypervisor */
wr_ghcb_msr(val);
VMGEXIT();
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 65f79092c9d9..fcd20c6dc7f9 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -10,6 +10,9 @@
#define ALT_FLAG_NOT (1 << 0)
#define ALT_NOT(feature) ((ALT_FLAG_NOT << ALT_FLAGS_SHIFT) | (feature))
+#define ALT_FLAG_DIRECT_CALL (1 << 1)
+#define ALT_DIRECT_CALL(feature) ((ALT_FLAG_DIRECT_CALL << ALT_FLAGS_SHIFT) | (feature))
+#define ALT_CALL_ALWAYS ALT_DIRECT_CALL(X86_FEATURE_ALWAYS)
#ifndef __ASSEMBLY__
@@ -86,6 +89,8 @@ struct alt_instr {
u8 replacementlen; /* length of new instruction */
} __packed;
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
@@ -101,11 +106,10 @@ extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
s32 *start_cfi, s32 *end_cfi);
struct module;
-struct paravirt_patch_site;
struct callthunk_sites {
s32 *call_start, *call_end;
- struct paravirt_patch_site *pv_start, *pv_end;
+ struct alt_instr *alt_start, *alt_end;
};
#ifdef CONFIG_CALL_THUNKS
@@ -150,6 +154,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
+#define ALT_CALL_INSTR "call BUG_func"
+
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
@@ -330,6 +336,22 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+/* Macro for creating assembler functions avoiding any C magic. */
+#define DEFINE_ASM_FUNC(func, instr, sec) \
+ asm (".pushsection " #sec ", \"ax\"\n" \
+ ".global " #func "\n\t" \
+ ".type " #func ", @function\n\t" \
+ ASM_FUNC_ALIGN "\n" \
+ #func ":\n\t" \
+ ASM_ENDBR \
+ instr "\n\t" \
+ ASM_RET \
+ ".size " #func ", . - " #func "\n\t" \
+ ".popsection")
+
+void BUG_func(void);
+void nop_func(void);
+
#else /* __ASSEMBLY__ */
#ifdef CONFIG_SMP
@@ -370,6 +392,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
.byte \alt_len
.endm
+.macro ALT_CALL_INSTR
+ call BUG_func
+.endm
+
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index ed0eaf65c437..5c37944c8a5e 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -104,7 +104,7 @@ static inline bool amd_gart_present(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return false;
- /* GART present only on Fam15h, upto model 0fh */
+ /* GART present only on Fam15h, up to model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
(boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
return true;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index d21f48f1c242..9d159b771dc8 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -272,8 +272,6 @@ struct apic {
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
- enum apic_delivery_modes delivery_mode;
-
u32 disable_esr : 1,
dest_mode_logical : 1,
x2apic_set_max_apicid : 1,
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 4b125e5b3187..094106b6a538 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -20,6 +20,13 @@
*/
#define IO_APIC_SLOT_SIZE 1024
+#define APIC_DELIVERY_MODE_FIXED 0
+#define APIC_DELIVERY_MODE_LOWESTPRIO 1
+#define APIC_DELIVERY_MODE_SMI 2
+#define APIC_DELIVERY_MODE_NMI 4
+#define APIC_DELIVERY_MODE_INIT 5
+#define APIC_DELIVERY_MODE_EXTINT 7
+
#define APIC_ID 0x20
#define APIC_LVR 0x30
@@ -165,279 +172,10 @@
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
-#ifndef __ASSEMBLY__
-/*
- * the local APIC register structure, memory mapped. Not terribly well
- * tested, but we might eventually use this one in the future - the
- * problem why we cannot use it right now is the P5 APIC, it has an
- * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
- */
-#define u32 unsigned int
-
-struct local_apic {
-
-/*000*/ struct { u32 __reserved[4]; } __reserved_01;
-
-/*010*/ struct { u32 __reserved[4]; } __reserved_02;
-
-/*020*/ struct { /* APIC ID Register */
- u32 __reserved_1 : 24,
- phys_apic_id : 4,
- __reserved_2 : 4;
- u32 __reserved[3];
- } id;
-
-/*030*/ const
- struct { /* APIC Version Register */
- u32 version : 8,
- __reserved_1 : 8,
- max_lvt : 8,
- __reserved_2 : 8;
- u32 __reserved[3];
- } version;
-
-/*040*/ struct { u32 __reserved[4]; } __reserved_03;
-
-/*050*/ struct { u32 __reserved[4]; } __reserved_04;
-
-/*060*/ struct { u32 __reserved[4]; } __reserved_05;
-
-/*070*/ struct { u32 __reserved[4]; } __reserved_06;
-
-/*080*/ struct { /* Task Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } tpr;
-
-/*090*/ const
- struct { /* Arbitration Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } apr;
-
-/*0A0*/ const
- struct { /* Processor Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } ppr;
-
-/*0B0*/ struct { /* End Of Interrupt Register */
- u32 eoi;
- u32 __reserved[3];
- } eoi;
-
-/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
-
-/*0D0*/ struct { /* Logical Destination Register */
- u32 __reserved_1 : 24,
- logical_dest : 8;
- u32 __reserved_2[3];
- } ldr;
-
-/*0E0*/ struct { /* Destination Format Register */
- u32 __reserved_1 : 28,
- model : 4;
- u32 __reserved_2[3];
- } dfr;
-
-/*0F0*/ struct { /* Spurious Interrupt Vector Register */
- u32 spurious_vector : 8,
- apic_enabled : 1,
- focus_cpu : 1,
- __reserved_2 : 22;
- u32 __reserved_3[3];
- } svr;
-
-/*100*/ struct { /* In Service Register */
-/*170*/ u32 bitfield;
- u32 __reserved[3];
- } isr [8];
-
-/*180*/ struct { /* Trigger Mode Register */
-/*1F0*/ u32 bitfield;
- u32 __reserved[3];
- } tmr [8];
-
-/*200*/ struct { /* Interrupt Request Register */
-/*270*/ u32 bitfield;
- u32 __reserved[3];
- } irr [8];
-
-/*280*/ union { /* Error Status Register */
- struct {
- u32 send_cs_error : 1,
- receive_cs_error : 1,
- send_accept_error : 1,
- receive_accept_error : 1,
- __reserved_1 : 1,
- send_illegal_vector : 1,
- receive_illegal_vector : 1,
- illegal_register_address : 1,
- __reserved_2 : 24;
- u32 __reserved_3[3];
- } error_bits;
- struct {
- u32 errors;
- u32 __reserved_3[3];
- } all_errors;
- } esr;
-
-/*290*/ struct { u32 __reserved[4]; } __reserved_08;
-
-/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
-
-/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
-
-/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
-
-/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
-
-/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
-
-/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
-
-/*300*/ struct { /* Interrupt Command Register 1 */
- u32 vector : 8,
- delivery_mode : 3,
- destination_mode : 1,
- delivery_status : 1,
- __reserved_1 : 1,
- level : 1,
- trigger : 1,
- __reserved_2 : 2,
- shorthand : 2,
- __reserved_3 : 12;
- u32 __reserved_4[3];
- } icr1;
-
-/*310*/ struct { /* Interrupt Command Register 2 */
- union {
- u32 __reserved_1 : 24,
- phys_dest : 4,
- __reserved_2 : 4;
- u32 __reserved_3 : 24,
- logical_dest : 8;
- } dest;
- u32 __reserved_4[3];
- } icr2;
-
-/*320*/ struct { /* LVT - Timer */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- timer_mode : 1,
- __reserved_3 : 14;
- u32 __reserved_4[3];
- } lvt_timer;
-
-/*330*/ struct { /* LVT - Thermal Sensor */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_thermal;
-
-/*340*/ struct { /* LVT - Performance Counter */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_pc;
-
-/*350*/ struct { /* LVT - LINT0 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint0;
-
-/*360*/ struct { /* LVT - LINT1 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint1;
-
-/*370*/ struct { /* LVT - Error */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_error;
-
-/*380*/ struct { /* Timer Initial Count Register */
- u32 initial_count;
- u32 __reserved_2[3];
- } timer_icr;
-
-/*390*/ const
- struct { /* Timer Current Count Register */
- u32 curr_count;
- u32 __reserved_2[3];
- } timer_ccr;
-
-/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
-
-/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
-
-/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
-
-/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
-
-/*3E0*/ struct { /* Timer Divide Configuration Register */
- u32 divisor : 4,
- __reserved_1 : 28;
- u32 __reserved_2[3];
- } timer_dcr;
-
-/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
-
-} __attribute__ ((packed));
-
-#undef u32
-
#ifdef CONFIG_X86_32
#define BAD_APICID 0xFFu
#else
#define BAD_APICID 0xFFFFu
#endif
-enum apic_delivery_modes {
- APIC_DELIVERY_MODE_FIXED = 0,
- APIC_DELIVERY_MODE_LOWESTPRIO = 1,
- APIC_DELIVERY_MODE_SMI = 2,
- APIC_DELIVERY_MODE_NMI = 4,
- APIC_DELIVERY_MODE_INIT = 5,
- APIC_DELIVERY_MODE_EXTINT = 7,
-};
-
-#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_APICDEF_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 35389b2af88e..0216f63a366b 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -81,22 +81,4 @@ do { \
#include <asm-generic/barrier.h>
-/*
- * Make previous memory operations globally visible before
- * a WRMSR.
- *
- * MFENCE makes writes visible, but only affects load/store
- * instructions. WRMSR is unfortunately not a load/store
- * instruction and is unaffected by MFENCE. The LFENCE ensures
- * that the WRMSR is not reordered.
- *
- * Most WRMSRs are full serializing instructions themselves and
- * do not require this barrier. This is only required for the
- * IA32_TSC_DEADLINE and X2APIC MSRs.
- */
-static inline void weak_wrmsr_fence(void)
-{
- asm volatile("mfence; lfence" : : : "memory");
-}
-
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4af140cf5719..632c26cdeeda 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -218,7 +218,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
@@ -308,10 +308,14 @@
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
-
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
+#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
+#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
+#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
+#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
+#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index f7e7099af595..d440a65af8f3 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -8,6 +8,56 @@
* archs.
*/
+/*
+ * Low-level interface mapping flags/field names to bits
+ */
+
+/* Flags for _DESC_S (non-system) descriptors */
+#define _DESC_ACCESSED 0x0001
+#define _DESC_DATA_WRITABLE 0x0002
+#define _DESC_CODE_READABLE 0x0002
+#define _DESC_DATA_EXPAND_DOWN 0x0004
+#define _DESC_CODE_CONFORMING 0x0004
+#define _DESC_CODE_EXECUTABLE 0x0008
+
+/* Common flags */
+#define _DESC_S 0x0010
+#define _DESC_DPL(dpl) ((dpl) << 5)
+#define _DESC_PRESENT 0x0080
+
+#define _DESC_LONG_CODE 0x2000
+#define _DESC_DB 0x4000
+#define _DESC_GRANULARITY_4K 0x8000
+
+/* System descriptors have a numeric "type" field instead of flags */
+#define _DESC_SYSTEM(code) (code)
+
+/*
+ * High-level interface mapping intended usage to low-level combinations
+ * of flags
+ */
+
+#define _DESC_DATA (_DESC_S | _DESC_PRESENT | _DESC_ACCESSED | \
+ _DESC_DATA_WRITABLE)
+#define _DESC_CODE (_DESC_S | _DESC_PRESENT | _DESC_ACCESSED | \
+ _DESC_CODE_READABLE | _DESC_CODE_EXECUTABLE)
+
+#define DESC_DATA16 (_DESC_DATA)
+#define DESC_CODE16 (_DESC_CODE)
+
+#define DESC_DATA32 (_DESC_DATA | _DESC_GRANULARITY_4K | _DESC_DB)
+#define DESC_DATA32_BIOS (_DESC_DATA | _DESC_DB)
+
+#define DESC_CODE32 (_DESC_CODE | _DESC_GRANULARITY_4K | _DESC_DB)
+#define DESC_CODE32_BIOS (_DESC_CODE | _DESC_DB)
+
+#define DESC_TSS32 (_DESC_SYSTEM(9) | _DESC_PRESENT)
+
+#define DESC_DATA64 (_DESC_DATA | _DESC_GRANULARITY_4K | _DESC_DB)
+#define DESC_CODE64 (_DESC_CODE | _DESC_GRANULARITY_4K | _DESC_LONG_CODE)
+
+#define DESC_USER (_DESC_DPL(3))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -22,19 +72,19 @@ struct desc_struct {
#define GDT_ENTRY_INIT(flags, base, limit) \
{ \
- .limit0 = (u16) (limit), \
- .limit1 = ((limit) >> 16) & 0x0F, \
- .base0 = (u16) (base), \
- .base1 = ((base) >> 16) & 0xFF, \
- .base2 = ((base) >> 24) & 0xFF, \
- .type = (flags & 0x0f), \
- .s = (flags >> 4) & 0x01, \
- .dpl = (flags >> 5) & 0x03, \
- .p = (flags >> 7) & 0x01, \
- .avl = (flags >> 12) & 0x01, \
- .l = (flags >> 13) & 0x01, \
- .d = (flags >> 14) & 0x01, \
- .g = (flags >> 15) & 0x01, \
+ .limit0 = ((limit) >> 0) & 0xFFFF, \
+ .limit1 = ((limit) >> 16) & 0x000F, \
+ .base0 = ((base) >> 0) & 0xFFFF, \
+ .base1 = ((base) >> 16) & 0x00FF, \
+ .base2 = ((base) >> 24) & 0x00FF, \
+ .type = ((flags) >> 0) & 0x000F, \
+ .s = ((flags) >> 4) & 0x0001, \
+ .dpl = ((flags) >> 5) & 0x0003, \
+ .p = ((flags) >> 7) & 0x0001, \
+ .avl = ((flags) >> 12) & 0x0001, \
+ .l = ((flags) >> 13) & 0x0001, \
+ .d = ((flags) >> 14) & 0x0001, \
+ .g = ((flags) >> 15) & 0x0001, \
}
enum {
@@ -94,6 +144,7 @@ struct gate_struct {
typedef struct gate_struct gate_desc;
+#ifndef _SETUP
static inline unsigned long gate_offset(const gate_desc *g)
{
#ifdef CONFIG_X86_64
@@ -108,6 +159,7 @@ static inline unsigned long gate_segment(const gate_desc *g)
{
return g->segment;
}
+#endif
struct desc_ptr {
unsigned short size;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index a0234dfd1031..1e16bd5ac781 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -150,7 +150,7 @@ do { \
((x)->e_machine == EM_X86_64)
#define compat_elf_check_arch(x) \
- ((elf_check_arch_ia32(x) && ia32_enabled()) || \
+ ((elf_check_arch_ia32(x) && ia32_enabled_verbose()) || \
(IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
static inline void elf_common_init(struct thread_struct *t,
diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h
index 991e31cfde94..fe6312045042 100644
--- a/arch/x86/include/asm/extable_fixup_types.h
+++ b/arch/x86/include/asm/extable_fixup_types.h
@@ -4,7 +4,7 @@
/*
* Our IMM is signed, as such it must live at the top end of the word. Also,
- * since C99 hex constants are of ambigious type, force cast the mask to 'int'
+ * since C99 hex constants are of ambiguous type, force cast the mask to 'int'
* so that FIELD_GET() will DTRT and sign extend the value when it extracts it.
*/
#define EX_DATA_TYPE_MASK ((int)0x000000FF)
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index eb810074f1e7..f1fadc318a88 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -415,7 +415,7 @@ struct fpu_state_perm {
*
* This master permission field is only to be used when
* task.fpu.fpstate based checks fail to validate whether the task
- * is allowed to expand it's xfeatures set which requires to
+ * is allowed to expand its xfeatures set which requires to
* allocate a larger sized fpstate buffer.
*
* Do not access this field directly. Use the provided helper
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 9805629479d9..c7ef6ea2fa99 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_IA32_H
#define _ASM_X86_IA32_H
-
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
@@ -91,4 +90,14 @@ static inline void ia32_disable(void) {}
#endif
+static inline bool ia32_enabled_verbose(void)
+{
+ bool enabled = ia32_enabled();
+
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) && !enabled)
+ pr_notice_once("32-bit emulation disabled. You can reenable with ia32_emulation=on\n");
+
+ return enabled;
+}
+
#endif /* _ASM_X86_IA32_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 76238842406a..3814a9263d64 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -242,7 +242,7 @@ static inline void slow_down_io(void)
#endif
-#define BUILDIO(bwl, bw, type) \
+#define BUILDIO(bwl, type) \
static inline void out##bwl##_p(type value, u16 port) \
{ \
out##bwl(value, port); \
@@ -288,9 +288,9 @@ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \
} \
}
-BUILDIO(b, b, u8)
-BUILDIO(w, w, u16)
-BUILDIO(l, , u32)
+BUILDIO(b, u8)
+BUILDIO(w, u16)
+BUILDIO(l, u32)
#undef BUILDIO
#define inb_p inb_p
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index a1911fea8739..af7541c11821 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -111,7 +111,7 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
* This function will block all kernel access to the PMIC I2C bus, so that the
* P-Unit can safely access the PMIC over the shared I2C bus.
*
- * Note on these systems the i2c-bus driver will request a sempahore from the
+ * Note on these systems the i2c-bus driver will request a semaphore from the
* P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing
* it, but this does not appear to be sufficient, we still need to avoid making
* certain P-Unit requests during the access window to avoid problems.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d7036982332e..6711da000bb7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1652,7 +1652,7 @@ struct kvm_x86_ops {
/* Whether or not a virtual NMI is pending in hardware. */
bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu);
/*
- * Attempt to pend a virtual NMI in harware. Returns %true on success
+ * Attempt to pend a virtual NMI in hardware. Returns %true on success
* to allow using static_call_ret0 as the fallback.
*/
bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6de6e1d95952..de3118305838 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -311,6 +311,7 @@ enum smca_bank_types {
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
SMCA_UMC_V2,
+ SMCA_MA_LLC, /* Memory Attached Last Level Cache */
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
SMCA_PSP_V2,
@@ -326,6 +327,8 @@ enum smca_bank_types {
SMCA_SHUB, /* System HUB Unit */
SMCA_SATA, /* SATA Unit */
SMCA_USB, /* USB Unit */
+ SMCA_USR_DP, /* Ultra Short Reach Data Plane Controller */
+ SMCA_USR_CP, /* Ultra Short Reach Control Plane Controller */
SMCA_GMI_PCS, /* GMI PCS Unit */
SMCA_XGMI_PHY, /* xGMI PHY Unit */
SMCA_WAFL_PHY, /* WAFL PHY Unit */
@@ -333,7 +336,6 @@ enum smca_bank_types {
N_SMCA_BANK_TYPES
};
-extern const char *smca_get_long_name(enum smca_bank_types t);
extern bool amd_mce_is_memory_error(struct mce *m);
extern int mce_threshold_create_device(unsigned int cpu);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d51e1850ed0..737a52b89e64 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -237,6 +237,11 @@
#define LBR_INFO_CYCLES 0xffff
#define LBR_INFO_BR_TYPE_OFFSET 56
#define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+#define LBR_INFO_BR_CNTR_OFFSET 32
+#define LBR_INFO_BR_CNTR_NUM 4
+#define LBR_INFO_BR_CNTR_BITS 2
+#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0)
+#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0)
#define MSR_ARCH_LBR_CTL 0x000014ce
#define ARCH_LBR_CTL_LBREN BIT(0)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 778df05f8539..920426d691ce 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -87,6 +87,15 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
:: "a" (eax), "b" (ebx), "c" (ecx));
}
+/*
+ * Re-enable interrupts right upon calling mwait in such a way that
+ * no interrupt can fire _before_ the execution of mwait, ie: no
+ * instruction must be placed between "sti" and "mwait".
+ *
+ * This is necessary because if an interrupt queues a timer before
+ * executing mwait, it would otherwise go unnoticed and the next tick
+ * would not be reprogrammed accordingly before mwait ever wakes up.
+ */
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
mds_idle_clear_cpu_buffers();
@@ -115,8 +124,15 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
}
__monitor((void *)&current_thread_info()->flags, 0, 0);
- if (!need_resched())
- __mwait(eax, ecx);
+
+ if (!need_resched()) {
+ if (ecx & 1) {
+ __mwait(eax, ecx);
+ } else {
+ __sti_mwait(eax, ecx);
+ raw_local_irq_disable();
+ }
+ }
}
current_clr_polling();
}
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f93e9b96927a..262e65539f83 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -49,7 +49,7 @@
* but there is still a cushion vs. the RSB depth. The algorithm does not
* claim to be perfect and it can be speculated around by the CPU, but it
* is considered that it obfuscates the problem enough to make exploitation
- * extremly difficult.
+ * extremely difficult.
*/
#define RET_DEPTH_SHIFT 5
#define RSB_RET_STUFF_LOOPS 16
@@ -208,7 +208,7 @@
/*
* Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
- * eventually turn into it's own annotation.
+ * eventually turn into its own annotation.
*/
.macro VALIDATE_UNRET_END
#if defined(CONFIG_NOINSTR_VALIDATION) && \
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6c8ff12140ae..8bcf7584e7dd 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -142,8 +142,7 @@ static inline void write_cr0(unsigned long x)
static __always_inline unsigned long read_cr2(void)
{
return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
- "mov %%cr2, %%rax;",
- ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%cr2, %%rax;", ALT_NOT_XEN);
}
static __always_inline void write_cr2(unsigned long x)
@@ -154,13 +153,12 @@ static __always_inline void write_cr2(unsigned long x)
static inline unsigned long __read_cr3(void)
{
return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
- "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%cr3, %%rax;", ALT_NOT_XEN);
}
static inline void write_cr3(unsigned long x)
{
- PVOP_ALT_VCALL1(mmu.write_cr3, x,
- "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
}
static inline void __write_cr4(unsigned long x)
@@ -182,7 +180,7 @@ extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)
{
- PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
}
static inline u64 paravirt_read_msr(unsigned msr)
@@ -390,27 +388,25 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val)
{
return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pteval_t pte_val(pte_t pte)
{
return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline pgd_t __pgd(pgdval_t val)
{
return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pgdval_t pgd_val(pgd_t pgd)
{
return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -444,14 +440,13 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline pmd_t __pmd(pmdval_t val)
{
return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pmdval_t pmd_val(pmd_t pmd)
{
return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -464,7 +459,7 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;
ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
return (pud_t) { ret };
}
@@ -472,7 +467,7 @@ static inline pud_t __pud(pudval_t val)
static inline pudval_t pud_val(pud_t pud)
{
return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void pud_clear(pud_t *pudp)
@@ -492,8 +487,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline p4d_t __p4d(p4dval_t val)
{
p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
return (p4d_t) { ret };
}
@@ -501,7 +495,7 @@ static inline p4d_t __p4d(p4dval_t val)
static inline p4dval_t p4d_val(p4d_t p4d)
{
return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
@@ -687,17 +681,17 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
static __always_inline unsigned long arch_local_save_flags(void)
{
return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
- ALT_NOT(X86_FEATURE_XENPV));
+ ALT_NOT_XEN);
}
static __always_inline void arch_local_irq_disable(void)
{
- PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
}
static __always_inline void arch_local_irq_enable(void)
{
- PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
}
static __always_inline unsigned long arch_local_irq_save(void)
@@ -726,52 +720,25 @@ static __always_inline unsigned long arch_local_irq_save(void)
#undef PVOP_VCALL4
#undef PVOP_CALL4
-#define DEFINE_PARAVIRT_ASM(func, instr, sec) \
- asm (".pushsection " #sec ", \"ax\"\n" \
- ".global " #func "\n\t" \
- ".type " #func ", @function\n\t" \
- ASM_FUNC_ALIGN "\n" \
- #func ":\n\t" \
- ASM_ENDBR \
- instr "\n\t" \
- ASM_RET \
- ".size " #func ", . - " #func "\n\t" \
- ".popsection")
-
extern void default_banner(void);
void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */
-#define _PVSITE(ptype, ops, word, algn) \
-771:; \
- ops; \
-772:; \
- .pushsection .parainstructions,"a"; \
- .align algn; \
- word 771b; \
- .byte ptype; \
- .byte 772b-771b; \
- _ASM_ALIGN; \
- .popsection
-
-
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
+#ifdef CONFIG_DEBUG_ENTRY
-#define PARA_PATCH(off) ((off) / 8)
-#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
-#ifdef CONFIG_DEBUG_ENTRY
.macro PARA_IRQ_save_fl
- PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
- ANNOTATE_RETPOLINE_SAFE;
- call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
+ ANNOTATE_RETPOLINE_SAFE;
+ call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
.endm
-#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
- ALT_NOT(X86_FEATURE_XENPV)
+#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;", \
+ "ALT_CALL_INSTR;", ALT_CALL_ALWAYS, \
+ "pushf; pop %rax;", ALT_NOT_XEN
#endif
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 772d03487520..d8e85d2cf8d5 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -2,15 +2,6 @@
#ifndef _ASM_X86_PARAVIRT_TYPES_H
#define _ASM_X86_PARAVIRT_TYPES_H
-#ifndef __ASSEMBLY__
-/* These all sit in the .parainstructions section to tell us what to patch. */
-struct paravirt_patch_site {
- u8 *instr; /* original instructions */
- u8 type; /* type of this instruction */
- u8 len; /* length of original instruction */
-};
-#endif
-
#ifdef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
@@ -250,43 +241,11 @@ struct paravirt_patch_template {
extern struct pv_info pv_info;
extern struct paravirt_patch_template pv_ops;
-#define PARAVIRT_PATCH(x) \
- (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
-
-#define paravirt_type(op) \
- [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
- [paravirt_opptr] "m" (pv_ops.op)
-/*
- * Generate some code, and mark it as patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- _ASM_ALIGN "\n" \
- ".popsection\n"
-
-/* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string) \
- _paravirt_alt(insn_string, "%c[paravirt_typenum]")
-
-/* Simple instruction patching code. */
-#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
-
-unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
+#define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op)
int paravirt_disable_iospace(void);
-/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
+/* This generates an indirect call based on the operation type number. */
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%[paravirt_opptr];"
@@ -319,12 +278,6 @@ int paravirt_disable_iospace(void);
* However, x86_64 also has to clobber all caller saved registers, which
* unfortunately, are quite a bit (r8 - r11)
*
- * The call instruction itself is marked by placing its start address
- * and size into the .parainstructions section, so that
- * apply_paravirt() in arch/i386/kernel/alternative.c can do the
- * appropriate patching under the control of the backend pv_init_ops
- * implementation.
- *
* Unfortunately there's no way to get gcc to generate the args setup
* for the call, and then allow the call itself to be generated by an
* inline asm. Because of this, we must do the complete arg setup and
@@ -423,14 +376,27 @@ int paravirt_disable_iospace(void);
__mask & __eax; \
})
-
+/*
+ * Use alternative patching for paravirt calls:
+ * - For replacing an indirect call with a direct one, use the "normal"
+ * ALTERNATIVE() macro with the indirect call as the initial code sequence,
+ * which will be replaced with the related direct call by using the
+ * ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
+ * - In case the replacement is either a direct call or a short code sequence
+ * depending on a feature bit, the ALTERNATIVE_2() macro is being used.
+ * The indirect call is the initial code sequence again, while the special
+ * code sequence is selected with the specified feature bit. In case the
+ * feature is not active, the direct call is used as above via the
+ * ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
+ */
#define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...) \
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
- asm volatile(paravirt_alt(PARAVIRT_CALL) \
+ asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR, \
+ ALT_CALL_ALWAYS) \
: call_clbr, ASM_CALL_CONSTRAINT \
- : paravirt_type(op), \
+ : paravirt_ptr(op), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
@@ -441,10 +407,11 @@ int paravirt_disable_iospace(void);
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
- asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \
- alt, cond) \
+ asm volatile(ALTERNATIVE_2(PARAVIRT_CALL, \
+ ALT_CALL_INSTR, ALT_CALL_ALWAYS, \
+ alt, cond) \
: call_clbr, ASM_CALL_CONSTRAINT \
- : paravirt_type(op), \
+ : paravirt_ptr(op), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
@@ -542,8 +509,6 @@ int paravirt_disable_iospace(void);
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-void _paravirt_nop(void);
-void paravirt_BUG(void);
unsigned long paravirt_ret0(void);
#ifdef CONFIG_PARAVIRT_XXL
u64 _paravirt_ident_64(u64);
@@ -553,11 +518,11 @@ void pv_native_irq_enable(void);
unsigned long pv_native_read_cr2(void);
#endif
-#define paravirt_nop ((void *)_paravirt_nop)
-
-extern struct paravirt_patch_site __parainstructions[],
- __parainstructions_end[];
+#define paravirt_nop ((void *)nop_func)
#endif /* __ASSEMBLY__ */
+
+#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
+
#endif /* CONFIG_PARAVIRT */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 2618ec7c3d1d..3736b8a46c04 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -31,6 +31,7 @@
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
+#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
#define INTEL_FIXED_BITS_MASK 0xFULL
#define INTEL_FIXED_BITS_STRIDE 4
@@ -223,6 +224,9 @@ union cpuid28_ecx {
unsigned int lbr_timed_lbr:1;
/* Branch Type Field Supported */
unsigned int lbr_br_type:1;
+ unsigned int reserved:13;
+ /* Branch counters (Event Logging) Supported */
+ unsigned int lbr_counters:4;
} split;
unsigned int full;
};
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 57bab91bbf50..9d077bca6a10 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -141,6 +141,7 @@ static inline int pte_young(pte_t pte)
return pte_flags(pte) & _PAGE_ACCESSED;
}
+#define pmd_dirty pmd_dirty
static inline bool pmd_dirty(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
@@ -1679,12 +1680,6 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}
-#define arch_has_hw_pte_young arch_has_hw_pte_young
-static inline bool arch_has_hw_pte_young(void)
-{
- return true;
-}
-
#define arch_check_zapped_pte arch_check_zapped_pte
void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index a629b1b9f65a..24af25b1551a 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -203,7 +203,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
* F (2) in swp entry is used to record when a pagetable is
* writeprotected by userfaultfd WP support.
*
- * E (3) in swp entry is used to rememeber PG_anon_exclusive.
+ * E (3) in swp entry is used to remember PG_anon_exclusive.
*
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
* but also L and G.
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ae81a7191c1c..26620d7642a9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -749,4 +749,22 @@ enum mds_mitigations {
extern bool gds_ucode_mitigated(void);
+/*
+ * Make previous memory operations globally visible before
+ * a WRMSR.
+ *
+ * MFENCE makes writes visible, but only affects load/store
+ * instructions. WRMSR is unfortunately not a load/store
+ * instruction and is unaffected by MFENCE. The LFENCE ensures
+ * that the WRMSR is not reordered.
+ *
+ * Most WRMSRs are full serializing instructions themselves and
+ * do not require this barrier. This is only required for the
+ * IA32_TSC_DEADLINE and X2APIC MSRs.
+ */
+static inline void weak_wrmsr_fence(void)
+{
+ alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
+}
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 85b6e3609cb9..ef9697f20129 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -56,8 +56,8 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
"pop %rdx\n\t" \
FRAME_END
-DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
- PV_UNLOCK_ASM, .spinlock.text);
+DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock,
+ PV_UNLOCK_ASM, .spinlock.text);
#else /* CONFIG_64BIT */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index bf483fcb4e57..5c83729c8e71 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -31,8 +31,6 @@
#include <asm/bootparam.h>
#include <asm/x86_init.h>
-extern u64 relocated_ramdisk;
-
/* Interrupt control for vSMPowered x86_64 systems */
#ifdef CONFIG_X86_64
void vsmp_init(void);
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 29832c338cdc..0b70653a98c1 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -6,18 +6,6 @@
#include <linux/stddef.h>
#include <asm/ptrace.h>
-struct paravirt_patch_site;
-#ifdef CONFIG_PARAVIRT
-void apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end);
-#else
-static inline void apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end)
-{}
-#define __parainstructions NULL
-#define __parainstructions_end NULL
-#endif
-
/*
* Currently, the max observed size in the kernel code is
* JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 5fa76c2ced51..ea877fd83114 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -653,7 +653,7 @@ static inline int uv_blade_to_node(int blade)
return uv_socket_to_node(blade);
}
-/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+/* Blade number of current cpu. Numbered 0 .. <#blades -1> */
static inline int uv_numa_blade_id(void)
{
return uv_hub_info->numa_blade_id;
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index c81858d903dc..923053f366d7 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -321,7 +321,7 @@ static __always_inline
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
{
/*
- * Due to the MSB/Sign-bit being used as invald marker (see
+ * Due to the MSB/Sign-bit being used as invalid marker (see
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
*/
u64 delta = (cycles - last) & S64_MAX;
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index c599ec269a25..c10f279aae93 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -61,7 +61,7 @@
* RING1 -> RING3 kernel mode.
* RING2 -> RING3 kernel mode.
* RING3 -> RING3 user mode.
- * However RING0 indicates that the guest kernel should return to iteself
+ * However RING0 indicates that the guest kernel should return to itself
* directly with
* orb $3,1*8(%rsp)
* iretq
diff --git a/arch/x86/include/uapi/asm/amd_hsmp.h b/arch/x86/include/uapi/asm/amd_hsmp.h
index fce22686c834..e5d182c7373c 100644
--- a/arch/x86/include/uapi/asm/amd_hsmp.h
+++ b/arch/x86/include/uapi/asm/amd_hsmp.h
@@ -238,7 +238,7 @@ static const struct hsmp_msg_desc hsmp_msg_desc_table[] = {
/*
* HSMP_GET_DIMM_THERMAL, num_args = 1, response_sz = 1
* input: args[0] = DIMM address[7:0]
- * output: args[0] = temperature in degree celcius[31:21] + update rate in ms[16:8] +
+ * output: args[0] = temperature in degree celsius[31:21] + update rate in ms[16:8] +
* DIMM address[7:0]
*/
{1, 1, HSMP_GET},
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index aae7456ece07..55e205b21271 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -160,7 +160,6 @@ extern s32 __retpoline_sites[], __retpoline_sites_end[];
extern s32 __return_sites[], __return_sites_end[];
extern s32 __cfi_sites[], __cfi_sites_end[];
extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
-extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
void text_poke_early(void *addr, const void *opcode, size_t len);
@@ -395,6 +394,63 @@ apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
}
}
+/* Low-level backend functions usable from alternative code replacements. */
+DEFINE_ASM_FUNC(nop_func, "", .entry.text);
+EXPORT_SYMBOL_GPL(nop_func);
+
+noinstr void BUG_func(void)
+{
+ BUG();
+}
+EXPORT_SYMBOL_GPL(BUG_func);
+
+#define CALL_RIP_REL_OPCODE 0xff
+#define CALL_RIP_REL_MODRM 0x15
+
+/*
+ * Rewrite the "call BUG_func" replacement to point to the target of the
+ * indirect pv_ops call "call *disp(%ip)".
+ */
+static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
+{
+ void *target, *bug = &BUG_func;
+ s32 disp;
+
+ if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
+ pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
+ BUG();
+ }
+
+ if (a->instrlen != 6 ||
+ instr[0] != CALL_RIP_REL_OPCODE ||
+ instr[1] != CALL_RIP_REL_MODRM) {
+ pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
+ BUG();
+ }
+
+ /* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
+ disp = *(s32 *)(instr + 2);
+#ifdef CONFIG_X86_64
+ /* ff 15 00 00 00 00 call *0x0(%rip) */
+ /* target address is stored at "next instruction + disp". */
+ target = *(void **)(instr + a->instrlen + disp);
+#else
+ /* ff 15 00 00 00 00 call *0x0 */
+ /* target address is stored at disp. */
+ target = *(void **)disp;
+#endif
+ if (!target)
+ target = bug;
+
+ /* (BUG_func - .) + (target - BUG_func) := target - . */
+ *(s32 *)(insn_buff + 1) += target - bug;
+
+ if (target == &nop_func)
+ return 0;
+
+ return 5;
+}
+
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
@@ -452,16 +508,21 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue;
}
- DPRINTK(ALT, "feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
- (a->flags & ALT_FLAG_NOT) ? "!" : "",
+ DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
a->cpuid >> 5,
a->cpuid & 0x1f,
instr, instr, a->instrlen,
- replacement, a->replacementlen);
+ replacement, a->replacementlen, a->flags);
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
+ if (a->flags & ALT_FLAG_DIRECT_CALL) {
+ insn_buff_sz = alt_replace_call(instr, insn_buff, a);
+ if (insn_buff_sz < 0)
+ continue;
+ }
+
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
@@ -1421,46 +1482,6 @@ int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_PARAVIRT
-
-/* Use this to add nops to a buffer, then text_poke the whole buffer. */
-static void __init_or_module add_nops(void *insns, unsigned int len)
-{
- while (len > 0) {
- unsigned int noplen = len;
- if (noplen > ASM_NOP_MAX)
- noplen = ASM_NOP_MAX;
- memcpy(insns, x86_nops[noplen], noplen);
- insns += noplen;
- len -= noplen;
- }
-}
-
-void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end)
-{
- struct paravirt_patch_site *p;
- char insn_buff[MAX_PATCH_LEN];
-
- for (p = start; p < end; p++) {
- unsigned int used;
-
- BUG_ON(p->len > MAX_PATCH_LEN);
- /* prep the buffer with the original instructions */
- memcpy(insn_buff, p->instr, p->len);
- used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
-
- BUG_ON(used > p->len);
-
- /* Pad the rest with nops */
- add_nops(insn_buff + used, p->len - used);
- text_poke_early(p->instr, insn_buff, p->len);
- }
-}
-extern struct paravirt_patch_site __start_parainstructions[],
- __stop_parainstructions[];
-#endif /* CONFIG_PARAVIRT */
-
/*
* Self-test for the INT3 based CALL emulation code.
*
@@ -1596,28 +1617,11 @@ void __init alternative_instructions(void)
*/
/*
- * Paravirt patching and alternative patching can be combined to
- * replace a function call with a short direct code sequence (e.g.
- * by setting a constant return value instead of doing that in an
- * external function).
- * In order to make this work the following sequence is required:
- * 1. set (artificial) features depending on used paravirt
- * functions which can later influence alternative patching
- * 2. apply paravirt patching (generally replacing an indirect
- * function call with a direct one)
- * 3. apply alternative patching (e.g. replacing a direct function
- * call with a custom code sequence)
- * Doing paravirt patching after alternative patching would clobber
- * the optimization of the custom code with a function call again.
+ * Make sure to set (artificial) features depending on used paravirt
+ * functions which can later influence alternative patching.
*/
paravirt_set_cap();
- /*
- * First patch paravirt functions, such that we overwrite the indirect
- * call with the direct call.
- */
- apply_paravirt(__parainstructions, __parainstructions_end);
-
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
__cfi_sites, __cfi_sites_end, true);
@@ -1628,10 +1632,6 @@ void __init alternative_instructions(void)
apply_retpolines(__retpoline_sites, __retpoline_sites_end);
apply_returns(__return_sites, __return_sites_end);
- /*
- * Then patch alternatives, such that those paravirt calls that are in
- * alternatives can be overwritten by their immediate fragments.
- */
apply_alternatives(__alt_instructions, __alt_instructions_end);
/*
@@ -1906,7 +1906,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
* Note that the caller must ensure that if the modified code is part of a
* module, the module would not be removed during poking. This can be achieved
* by registering a module notifier, and ordering module removal and patching
- * trough a mutex.
+ * through a mutex.
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 56a917df410d..2ae98f754e59 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -776,7 +776,7 @@ int __init gart_iommu_init(void)
iommu_size >> PAGE_SHIFT);
/*
* Tricky. The GART table remaps the physical memory range,
- * so the CPU wont notice potential aliases and if the memory
+ * so the CPU won't notice potential aliases and if the memory
* is remapped to UC later on, we might surprise the PCI devices
* with a stray writeout of a cacheline. So play it sure and
* do an explicit, full-scale wbinvd() _after_ having marked all
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 2ee867d796d9..3bf0487cf3b7 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -4,7 +4,7 @@
#
# Leads to non-deterministic coverage that is not a function of syscall inputs.
-# In particualr, smp_apic_timer_interrupt() is called in random places.
+# In particular, smp_apic_timer_interrupt() is called in random places.
KCOV_INSTRUMENT := n
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_common.o apic_noop.o ipi.o vector.o init.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 41093cf20acd..4667bc4b00ab 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -782,7 +782,7 @@ bool __init apic_needs_pit(void)
/*
* If interrupt delivery mode is legacy PIC or virtual wire without
- * configuration, the local APIC timer wont be set up. Make sure
+ * configuration, the local APIC timer won't be set up. Make sure
* that the PIT is initialized.
*/
if (apic_intr_mode == APIC_PIC ||
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 7139867d69cd..b295a056a4fc 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -82,7 +82,6 @@ static struct apic apic_flat __ro_after_init = {
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
.apic_id_registered = default_apic_id_registered,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = true,
.disable_esr = 0,
@@ -154,7 +153,6 @@ static struct apic apic_physflat __ro_after_init = {
.acpi_madt_oem_check = physflat_acpi_madt_oem_check,
.apic_id_registered = default_apic_id_registered,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index b00d52ae84fa..9f1d553eb48f 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -47,7 +47,6 @@ static void noop_apic_write(u32 reg, u32 val)
struct apic apic_noop __ro_after_init = {
.name = "noop",
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = true,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 456a14c44f67..7d0c51b9d3bc 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -222,7 +222,6 @@ static const struct apic apic_numachip1 __refconst = {
.probe = numachip1_probe,
.acpi_madt_oem_check = numachip1_acpi_madt_oem_check,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 0,
@@ -259,7 +258,6 @@ static const struct apic apic_numachip2 __refconst = {
.probe = numachip2_probe,
.acpi_madt_oem_check = numachip2_acpi_madt_oem_check,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 7ee3c486cb33..5a0d60b38e6b 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -80,7 +80,6 @@ static struct apic apic_bigsmp __ro_after_init = {
.name = "bigsmp",
.probe = probe_bigsmp,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 1,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 00da6cf6b07d..40c7cf180c20 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -997,7 +997,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
/*
* Legacy ISA IRQ has already been allocated, just add pin to
* the pin list associated with this IRQ and program the IOAPIC
- * entry. The IOAPIC entry
+ * entry.
*/
if (irq_data && irq_data->parent_data) {
if (!mp_check_pin_attr(irq, info))
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 5eb3fbe472da..c0f78059f06a 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -45,7 +45,6 @@ static struct apic apic_default __ro_after_init = {
.probe = probe_default,
.apic_id_registered = default_apic_id_registered,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = true,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 319448d87b99..185738c72766 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -738,8 +738,8 @@ int __init arch_probe_nr_irqs(void)
void lapic_assign_legacy_vector(unsigned int irq, bool replace)
{
/*
- * Use assign system here so it wont get accounted as allocated
- * and moveable in the cpu hotplug check and it prevents managed
+ * Use assign system here so it won't get accounted as allocated
+ * and movable in the cpu hotplug check and it prevents managed
* irq reservation from touching it.
*/
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index a8306089c91b..28a7d3f2312d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -227,7 +227,6 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.probe = x2apic_cluster_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = true,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 558a4a8824f4..409815a40668 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -145,7 +145,6 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.probe = x2apic_phys_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1b0d7336a28f..f1766b18dcd0 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -805,7 +805,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.probe = uv_probe,
.acpi_madt_oem_check = uv_acpi_madt_oem_check,
- .delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = false,
.disable_esr = 0,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5934ee5bc087..76a5ced278c2 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -420,7 +420,7 @@ static DEFINE_MUTEX(apm_mutex);
* This is for buggy BIOS's that refer to (real mode) segment 0x40
* even though they are called in protected mode.
*/
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(DESC_DATA32_BIOS,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
static const char driver_version[] = "1.16ac"; /* no spaces */
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index e9ad518a5003..64ad2ddea121 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -233,14 +233,13 @@ patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
}
static __init_or_module void
-patch_paravirt_call_sites(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end,
- const struct core_text *ct)
+patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
+ const struct core_text *ct)
{
- struct paravirt_patch_site *p;
+ struct alt_instr *a;
- for (p = start; p < end; p++)
- patch_call(p->instr, ct);
+ for (a = start; a < end; a++)
+ patch_call((void *)&a->instr_offset + a->instr_offset, ct);
}
static __init_or_module void
@@ -248,7 +247,7 @@ callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
{
prdbg("Patching call sites %s\n", ct->name);
patch_call_sites(cs->call_start, cs->call_end, ct);
- patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
+ patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
prdbg("Patching call sites done%s\n", ct->name);
}
@@ -257,8 +256,8 @@ void __init callthunks_patch_builtin_calls(void)
struct callthunk_sites cs = {
.call_start = __call_sites,
.call_end = __call_sites_end,
- .pv_start = __parainstructions,
- .pv_end = __parainstructions_end
+ .alt_start = __alt_instructions,
+ .alt_end = __alt_instructions_end
};
if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f322ebd053a9..9f42d1c59e09 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -34,87 +34,6 @@
*/
static u32 nodes_per_socket = 1;
-/*
- * AMD errata checking
- *
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
- * have an OSVW id assigned, which it takes as first argument. Both take a
- * variable number of family-specific model-stepping ranges created by
- * AMD_MODEL_RANGE().
- *
- * Example:
- *
- * const int amd_erratum_319[] =
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
- */
-
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
-
-static const int amd_erratum_400[] =
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
-
-static const int amd_erratum_383[] =
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-
-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
-static const int amd_erratum_1054[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
-
-static const int amd_zenbleed[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
- AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
- AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
- AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
-
-static const int amd_div0[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
- AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
-
-static const int amd_erratum_1485[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
- AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
-
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
-{
- int osvw_id = *erratum++;
- u32 range;
- u32 ms;
-
- if (osvw_id >= 0 && osvw_id < 65536 &&
- cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
-
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
-
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
- }
- }
-
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_stepping;
- while ((range = *erratum++))
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range)))
- return true;
-
- return false;
-}
-
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -616,6 +535,49 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
}
resctrl_cpu_detect(c);
+
+ /* Figure out Zen generations: */
+ switch (c->x86) {
+ case 0x17: {
+ switch (c->x86_model) {
+ case 0x00 ... 0x2f:
+ case 0x50 ... 0x5f:
+ setup_force_cpu_cap(X86_FEATURE_ZEN1);
+ break;
+ case 0x30 ... 0x4f:
+ case 0x60 ... 0x7f:
+ case 0x90 ... 0x91:
+ case 0xa0 ... 0xaf:
+ setup_force_cpu_cap(X86_FEATURE_ZEN2);
+ break;
+ default:
+ goto warn;
+ }
+ break;
+ }
+ case 0x19: {
+ switch (c->x86_model) {
+ case 0x00 ... 0x0f:
+ case 0x20 ... 0x5f:
+ setup_force_cpu_cap(X86_FEATURE_ZEN3);
+ break;
+ case 0x10 ... 0x1f:
+ case 0x60 ... 0xaf:
+ setup_force_cpu_cap(X86_FEATURE_ZEN4);
+ break;
+ default:
+ goto warn;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return;
+
+warn:
+ WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
}
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
@@ -739,15 +701,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86 == 0x16 && c->x86_model <= 0xf)
msr_set_bit(MSR_AMD64_LS_CFG, 15);
- /*
- * Check whether the machine is affected by erratum 400. This is
- * used to select the proper idle routine and to enable the check
- * whether the machine is affected in arch_post_acpi_init(), which
- * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
- */
- if (cpu_has_amd_erratum(c, amd_erratum_400))
- set_cpu_bug(c, X86_BUG_AMD_E400);
-
early_detect_mem_encrypt(c);
/* Re-enable TopologyExtensions if switched off by BIOS */
@@ -814,6 +767,16 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, 6);
#endif
set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
+
+ /*
+ * Check models and steppings affected by erratum 400. This is
+ * used to select the proper idle routine and to enable the
+ * check whether the machine is affected in arch_post_acpi_subsys_init()
+ * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
+ */
+ if (c->x86_model > 0x41 ||
+ (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
+ setup_force_cpu_bug(X86_BUG_AMD_E400);
}
static void init_amd_gh(struct cpuinfo_x86 *c)
@@ -847,8 +810,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
*/
msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
- if (cpu_has_amd_erratum(c, amd_erratum_383))
- set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+ set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+
+ /*
+ * Check models and steppings affected by erratum 400. This is
+ * used to select the proper idle routine and to enable the
+ * check whether the machine is affected in arch_post_acpi_subsys_init()
+ * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
+ */
+ if (c->x86_model > 0x2 ||
+ (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
+ setup_force_cpu_bug(X86_BUG_AMD_E400);
}
static void init_amd_ln(struct cpuinfo_x86 *c)
@@ -941,6 +913,19 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
clear_rdrand_cpuid_bit(c);
}
+static void fix_erratum_1386(struct cpuinfo_x86 *c)
+{
+ /*
+ * Work around Erratum 1386. The XSAVES instruction malfunctions in
+ * certain circumstances on Zen1/2 uarch, and not all parts have had
+ * updated microcode at the time of writing (March 2023).
+ *
+ * Affected parts all have no supervisor XSAVE states, meaning that
+ * the XSAVEC instruction (which works fine) is equivalent.
+ */
+ clear_cpu_cap(c, X86_FEATURE_XSAVES);
+}
+
void init_spectral_chicken(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_CPU_UNRET_ENTRY
@@ -951,34 +936,28 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
*
* This suppresses speculation from the middle of a basic block, i.e. it
* suppresses non-branch predictions.
- *
- * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
*/
- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}
}
#endif
- /*
- * Work around Erratum 1386. The XSAVES instruction malfunctions in
- * certain circumstances on Zen1/2 uarch, and not all parts have had
- * updated microcode at the time of writing (March 2023).
- *
- * Affected parts all have no supervisor XSAVE states, meaning that
- * the XSAVEC instruction (which works fine) is equivalent.
- */
- clear_cpu_cap(c, X86_FEATURE_XSAVES);
}
-static void init_amd_zn(struct cpuinfo_x86 *c)
+static void init_amd_zen_common(void)
{
- set_cpu_cap(c, X86_FEATURE_ZEN);
-
+ setup_force_cpu_cap(X86_FEATURE_ZEN);
#ifdef CONFIG_NUMA
node_reclaim_distance = 32;
#endif
+}
+
+static void init_amd_zen1(struct cpuinfo_x86 *c)
+{
+ init_amd_zen_common();
+ fix_erratum_1386(c);
/* Fix up CPUID bits, but only if not virtualised. */
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
@@ -986,15 +965,10 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
/* Erratum 1076: CPB feature bit not being set in CPUID. */
if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
-
- /*
- * Zen3 (Fam19 model < 0x10) parts are not susceptible to
- * Branch Type Confusion, but predate the allocation of the
- * BTC_NO bit.
- */
- if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
- set_cpu_cap(c, X86_FEATURE_BTC_NO);
}
+
+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
+ setup_force_cpu_bug(X86_BUG_DIV0);
}
static bool cpu_has_zenbleed_microcode(void)
@@ -1018,11 +992,8 @@ static bool cpu_has_zenbleed_microcode(void)
return true;
}
-static void zenbleed_check(struct cpuinfo_x86 *c)
+static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
{
- if (!cpu_has_amd_erratum(c, amd_zenbleed))
- return;
-
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
@@ -1037,6 +1008,37 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
}
}
+static void init_amd_zen2(struct cpuinfo_x86 *c)
+{
+ init_amd_zen_common();
+ init_spectral_chicken(c);
+ fix_erratum_1386(c);
+ zen2_zenbleed_check(c);
+}
+
+static void init_amd_zen3(struct cpuinfo_x86 *c)
+{
+ init_amd_zen_common();
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
+ /*
+ * Zen3 (Fam19 model < 0x10) parts are not susceptible to
+ * Branch Type Confusion, but predate the allocation of the
+ * BTC_NO bit.
+ */
+ if (!cpu_has(c, X86_FEATURE_BTC_NO))
+ set_cpu_cap(c, X86_FEATURE_BTC_NO);
+ }
+}
+
+static void init_amd_zen4(struct cpuinfo_x86 *c)
+{
+ init_amd_zen_common();
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
u64 vm_cr;
@@ -1072,11 +1074,17 @@ static void init_amd(struct cpuinfo_x86 *c)
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
case 0x16: init_amd_jg(c); break;
- case 0x17: init_spectral_chicken(c);
- fallthrough;
- case 0x19: init_amd_zn(c); break;
}
+ if (boot_cpu_has(X86_FEATURE_ZEN1))
+ init_amd_zen1(c);
+ else if (boot_cpu_has(X86_FEATURE_ZEN2))
+ init_amd_zen2(c);
+ else if (boot_cpu_has(X86_FEATURE_ZEN3))
+ init_amd_zen3(c);
+ else if (boot_cpu_has(X86_FEATURE_ZEN4))
+ init_amd_zen4(c);
+
/*
* Enable workaround for FXSAVE leak on CPUs
* without a XSaveErPtr feature
@@ -1136,7 +1144,7 @@ static void init_amd(struct cpuinfo_x86 *c)
* Counter May Be Inaccurate".
*/
if (cpu_has(c, X86_FEATURE_IRPERF) &&
- !cpu_has_amd_erratum(c, amd_erratum_1054))
+ (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
@@ -1152,16 +1160,8 @@ static void init_amd(struct cpuinfo_x86 *c)
cpu_has(c, X86_FEATURE_AUTOIBRS))
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
- zenbleed_check(c);
-
- if (cpu_has_amd_erratum(c, amd_div0)) {
- pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
- setup_force_cpu_bug(X86_BUG_DIV0);
- }
-
- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
- cpu_has_amd_erratum(c, amd_erratum_1485))
- msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
+ /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
+ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
}
#ifdef CONFIG_X86_32
@@ -1315,7 +1315,7 @@ static void zenbleed_check_cpu(void *unused)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
- zenbleed_check(c);
+ zen2_zenbleed_check(c);
}
void amd_check_microcode(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b14fc8c1c953..94bff381ef20 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -188,45 +188,37 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
* TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?)
*/
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
#else
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
* the transfer segment sizes are set at run time.
*/
- /* 32-bit code */
- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
- /* 16-bit code */
- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
- /* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+ [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
+ [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
+ [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
+ [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
+ [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
/*
* The APM segments have byte granularity and their bases
* are set at run time. All have 64k limits.
*/
- /* 32-bit code */
- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
- /* 16-bit code */
- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
- /* data */
- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
-
- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+ [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
+ [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
+ [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
+
+ [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
+ [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
#endif
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
@@ -1856,6 +1848,13 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
#endif
+
+ /*
+ * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
+ * Hygon will clear it in ->c_init() below.
+ */
+ set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+
/*
* Vendor-specific initialization. In this section we
* canonicalize the feature flags, meaning if there are
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 6f247d66758d..f0cd95502faa 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -354,6 +354,9 @@ static void init_hygon(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
check_null_seg_clears_base(c);
+
+ /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
+ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
}
static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index e4c3ba91321c..f18d35fe27a9 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -237,4 +237,4 @@ err_out_online:
cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
return ret;
}
-subsys_initcall(intel_epb_init);
+late_initcall(intel_epb_init);
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index f3517b8a8e91..2b46eb0fdf3a 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -87,42 +87,40 @@ struct smca_bank {
static DEFINE_PER_CPU_READ_MOSTLY(struct smca_bank[MAX_NR_BANKS], smca_banks);
static DEFINE_PER_CPU_READ_MOSTLY(u8[N_SMCA_BANK_TYPES], smca_bank_counts);
-struct smca_bank_name {
- const char *name; /* Short name for sysfs */
- const char *long_name; /* Long name for pretty-printing */
-};
-
-static struct smca_bank_name smca_names[] = {
- [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
- [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
- [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
- [SMCA_DE] = { "decode_unit", "Decode Unit" },
- [SMCA_RESERVED] = { "reserved", "Reserved" },
- [SMCA_EX] = { "execution_unit", "Execution Unit" },
- [SMCA_FP] = { "floating_point", "Floating Point Unit" },
- [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
- [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
- [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
+static const char * const smca_names[] = {
+ [SMCA_LS ... SMCA_LS_V2] = "load_store",
+ [SMCA_IF] = "insn_fetch",
+ [SMCA_L2_CACHE] = "l2_cache",
+ [SMCA_DE] = "decode_unit",
+ [SMCA_RESERVED] = "reserved",
+ [SMCA_EX] = "execution_unit",
+ [SMCA_FP] = "floating_point",
+ [SMCA_L3_CACHE] = "l3_cache",
+ [SMCA_CS ... SMCA_CS_V2] = "coherent_slave",
+ [SMCA_PIE] = "pie",
/* UMC v2 is separate because both of them can exist in a single system. */
- [SMCA_UMC] = { "umc", "Unified Memory Controller" },
- [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
- [SMCA_PB] = { "param_block", "Parameter Block" },
- [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
- [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
- [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
- [SMCA_MPDMA] = { "mpdma", "MPDMA Unit" },
- [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
- [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
- [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
- [SMCA_NBIF] = { "nbif", "NBIF Unit" },
- [SMCA_SHUB] = { "shub", "System Hub Unit" },
- [SMCA_SATA] = { "sata", "SATA Unit" },
- [SMCA_USB] = { "usb", "USB Unit" },
- [SMCA_GMI_PCS] = { "gmi_pcs", "Global Memory Interconnect PCS Unit" },
- [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
- [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
- [SMCA_GMI_PHY] = { "gmi_phy", "Global Memory Interconnect PHY Unit" },
+ [SMCA_UMC] = "umc",
+ [SMCA_UMC_V2] = "umc_v2",
+ [SMCA_MA_LLC] = "ma_llc",
+ [SMCA_PB] = "param_block",
+ [SMCA_PSP ... SMCA_PSP_V2] = "psp",
+ [SMCA_SMU ... SMCA_SMU_V2] = "smu",
+ [SMCA_MP5] = "mp5",
+ [SMCA_MPDMA] = "mpdma",
+ [SMCA_NBIO] = "nbio",
+ [SMCA_PCIE ... SMCA_PCIE_V2] = "pcie",
+ [SMCA_XGMI_PCS] = "xgmi_pcs",
+ [SMCA_NBIF] = "nbif",
+ [SMCA_SHUB] = "shub",
+ [SMCA_SATA] = "sata",
+ [SMCA_USB] = "usb",
+ [SMCA_USR_DP] = "usr_dp",
+ [SMCA_USR_CP] = "usr_cp",
+ [SMCA_GMI_PCS] = "gmi_pcs",
+ [SMCA_XGMI_PHY] = "xgmi_phy",
+ [SMCA_WAFL_PHY] = "wafl_phy",
+ [SMCA_GMI_PHY] = "gmi_phy",
};
static const char *smca_get_name(enum smca_bank_types t)
@@ -130,17 +128,8 @@ static const char *smca_get_name(enum smca_bank_types t)
if (t >= N_SMCA_BANK_TYPES)
return NULL;
- return smca_names[t].name;
-}
-
-const char *smca_get_long_name(enum smca_bank_types t)
-{
- if (t >= N_SMCA_BANK_TYPES)
- return NULL;
-
- return smca_names[t].long_name;
+ return smca_names[t];
}
-EXPORT_SYMBOL_GPL(smca_get_long_name);
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank)
{
@@ -178,6 +167,7 @@ static const struct smca_hwid smca_hwid_mcatypes[] = {
{ SMCA_CS, HWID_MCATYPE(0x2E, 0x0) },
{ SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) },
{ SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) },
+ { SMCA_MA_LLC, HWID_MCATYPE(0x2E, 0x4) },
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
@@ -212,6 +202,8 @@ static const struct smca_hwid smca_hwid_mcatypes[] = {
{ SMCA_SHUB, HWID_MCATYPE(0x80, 0x0) },
{ SMCA_SATA, HWID_MCATYPE(0xA8, 0x0) },
{ SMCA_USB, HWID_MCATYPE(0xAA, 0x0) },
+ { SMCA_USR_DP, HWID_MCATYPE(0x170, 0x0) },
+ { SMCA_USR_CP, HWID_MCATYPE(0x180, 0x0) },
{ SMCA_GMI_PCS, HWID_MCATYPE(0x241, 0x0) },
{ SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
{ SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 7b397370b4d6..fd5ce12c4f9a 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -44,6 +44,7 @@
#include <linux/sync_core.h>
#include <linux/task_work.h>
#include <linux/hardirq.h>
+#include <linux/kexec.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
@@ -233,6 +234,7 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
struct llist_node *pending;
struct mce_evt_llist *l;
int apei_err = 0;
+ struct page *p;
/*
* Allow instrumentation around external facilities usage. Not that it
@@ -286,6 +288,20 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
if (!fake_panic) {
if (panic_timeout == 0)
panic_timeout = mca_cfg.panic_timeout;
+
+ /*
+ * Kdump skips the poisoned page in order to avoid
+ * touching the error bits again. Poison the page even
+ * if the error is fatal and the machine is about to
+ * panic.
+ */
+ if (kexec_crash_loaded()) {
+ if (final && (final->status & MCI_STATUS_ADDRV)) {
+ p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
+ if (p)
+ SetPageHWPoison(p);
+ }
+ }
panic(msg);
} else
pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
@@ -670,6 +686,16 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
barrier();
m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
+ /*
+ * Update storm tracking here, before checking for the
+ * MCI_STATUS_VAL bit. Valid corrected errors count
+ * towards declaring, or maintaining, storm status. No
+ * error in a bank counts towards avoiding, or ending,
+ * storm status.
+ */
+ if (!mca_cfg.cmci_disabled)
+ mce_track_storm(&m);
+
/* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
continue;
@@ -1601,13 +1627,6 @@ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
-static unsigned long mce_adjust_timer_default(unsigned long interval)
-{
- return interval;
-}
-
-static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-
static void __start_timer(struct timer_list *t, unsigned long interval)
{
unsigned long when = jiffies + interval;
@@ -1637,15 +1656,9 @@ static void mce_timer_fn(struct timer_list *t)
iv = __this_cpu_read(mce_next_interval);
- if (mce_available(this_cpu_ptr(&cpu_info))) {
+ if (mce_available(this_cpu_ptr(&cpu_info)))
mc_poll_banks();
- if (mce_intel_cmci_poll()) {
- iv = mce_adjust_timer(iv);
- goto done;
- }
- }
-
/*
* Alert userspace if needed. If we logged an MCE, reduce the polling
* interval, otherwise increase the polling interval.
@@ -1655,23 +1668,29 @@ static void mce_timer_fn(struct timer_list *t)
else
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
-done:
- __this_cpu_write(mce_next_interval, iv);
- __start_timer(t, iv);
+ if (mce_get_storm_mode()) {
+ __start_timer(t, HZ);
+ } else {
+ __this_cpu_write(mce_next_interval, iv);
+ __start_timer(t, iv);
+ }
}
/*
- * Ensure that the timer is firing in @interval from now.
+ * When a storm starts on any bank on this CPU, switch to polling
+ * once per second. When the storm ends, revert to the default
+ * polling interval.
*/
-void mce_timer_kick(unsigned long interval)
+void mce_timer_kick(bool storm)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
- __start_timer(t, interval);
+ mce_set_storm_mode(storm);
- if (interval < iv)
- __this_cpu_write(mce_next_interval, interval);
+ if (storm)
+ __start_timer(t, HZ);
+ else
+ __this_cpu_write(mce_next_interval, check_interval * HZ);
}
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
@@ -1995,7 +2014,6 @@ static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
intel_init_cmci();
intel_init_lmce();
- mce_adjust_timer = cmci_intel_adjust_timer;
}
static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
@@ -2008,7 +2026,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
mce_intel_feature_init(c);
- mce_adjust_timer = cmci_intel_adjust_timer;
break;
case X86_VENDOR_AMD: {
@@ -2568,9 +2585,6 @@ static int mce_device_create(unsigned int cpu)
int err;
int i, j;
- if (!mce_available(&boot_cpu_data))
- return -EIO;
-
dev = per_cpu(mce_device, cpu);
if (dev)
return 0;
@@ -2665,8 +2679,6 @@ static void mce_reenable_cpu(void)
static int mce_cpu_dead(unsigned int cpu)
{
- mce_intel_hcpu_update(cpu);
-
/* intentionally ignoring frozen here */
if (!cpuhp_tasks_frozen)
cmci_rediscover();
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 4d8d4bcf915d..72f0695c3dc1 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -746,6 +746,7 @@ static void check_hw_inj_possible(void)
wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
+ wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
if (!status) {
hw_injection_possible = false;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 52bce533ddcc..399b62e223d2 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -42,15 +42,6 @@
static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
/*
- * CMCI storm detection backoff counter
- *
- * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
- * encountered an error. If not, we decrement it by one. We signal the end of
- * the CMCI storm when it reaches 0.
- */
-static DEFINE_PER_CPU(int, cmci_backoff_cnt);
-
-/*
* cmci_discover_lock protects against parallel discovery attempts
* which could race against each other.
*/
@@ -63,22 +54,26 @@ static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
*/
static DEFINE_SPINLOCK(cmci_poll_lock);
+/* Linux non-storm CMCI threshold (may be overridden by BIOS) */
#define CMCI_THRESHOLD 1
-#define CMCI_POLL_INTERVAL (30 * HZ)
-#define CMCI_STORM_INTERVAL (HZ)
-#define CMCI_STORM_THRESHOLD 15
-static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
-static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
-static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
-
-enum {
- CMCI_STORM_NONE,
- CMCI_STORM_ACTIVE,
- CMCI_STORM_SUBSIDED,
-};
+/*
+ * MCi_CTL2 threshold for each bank when there is no storm.
+ * Default value for each bank may have been set by BIOS.
+ */
+static u16 cmci_threshold[MAX_NR_BANKS];
-static atomic_t cmci_storm_on_cpus;
+/*
+ * High threshold to limit CMCI rate during storms. Max supported is
+ * 0x7FFF. Use this slightly smaller value so it has a distinctive
+ * signature when some asks "Why am I not seeing all corrected errors?"
+ * A high threshold is used instead of just disabling CMCI for a
+ * bank because both corrected and uncorrected errors may be logged
+ * in the same bank and signalled with CMCI. The threshold only applies
+ * to corrected errors, so keeping CMCI enabled means that uncorrected
+ * errors will still be processed in a timely fashion.
+ */
+#define CMCI_STORM_THRESHOLD 32749
static int cmci_supported(int *banks)
{
@@ -134,204 +129,166 @@ static bool lmce_supported(void)
return tmp & FEAT_CTL_LMCE_ENABLED;
}
-bool mce_intel_cmci_poll(void)
+/*
+ * Set a new CMCI threshold value. Preserve the state of the
+ * MCI_CTL2_CMCI_EN bit in case this happens during a
+ * cmci_rediscover() operation.
+ */
+static void cmci_set_threshold(int bank, int thresh)
{
- if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
- return false;
-
- /*
- * Reset the counter if we've logged an error in the last poll
- * during the storm.
- */
- if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
- this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
- else
- this_cpu_dec(cmci_backoff_cnt);
+ unsigned long flags;
+ u64 val;
- return true;
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh);
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
-void mce_intel_hcpu_update(unsigned long cpu)
+void mce_intel_handle_storm(int bank, bool on)
{
- if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
- atomic_dec(&cmci_storm_on_cpus);
+ if (on)
+ cmci_set_threshold(bank, CMCI_STORM_THRESHOLD);
+ else
+ cmci_set_threshold(bank, cmci_threshold[bank]);
+}
- per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
+/*
+ * The interrupt handler. This is called on every event.
+ * Just call the poller directly to log any events.
+ * This could in theory increase the threshold under high load,
+ * but doesn't for now.
+ */
+static void intel_threshold_interrupt(void)
+{
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
}
-static void cmci_toggle_interrupt_mode(bool on)
+/*
+ * Check all the reasons why current CPU cannot claim
+ * ownership of a bank.
+ * 1: CPU already owns this bank
+ * 2: BIOS owns this bank
+ * 3: Some other CPU owns this bank
+ */
+static bool cmci_skip_bank(int bank, u64 *val)
{
- unsigned long flags, *owned;
- int bank;
- u64 val;
+ unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- owned = this_cpu_ptr(mce_banks_owned);
- for_each_set_bit(bank, owned, MAX_NR_BANKS) {
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ if (test_bit(bank, owned))
+ return true;
- if (on)
- val |= MCI_CTL2_CMCI_EN;
- else
- val &= ~MCI_CTL2_CMCI_EN;
+ /* Skip banks in firmware first mode */
+ if (test_bit(bank, mce_banks_ce_disabled))
+ return true;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- }
- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), *val);
-unsigned long cmci_intel_adjust_timer(unsigned long interval)
-{
- if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
- (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
- mce_notify_irq();
- return CMCI_STORM_INTERVAL;
+ /* Already owned by someone else? */
+ if (*val & MCI_CTL2_CMCI_EN) {
+ clear_bit(bank, owned);
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
+ return true;
}
- switch (__this_cpu_read(cmci_storm_state)) {
- case CMCI_STORM_ACTIVE:
-
- /*
- * We switch back to interrupt mode once the poll timer has
- * silenced itself. That means no events recorded and the timer
- * interval is back to our poll interval.
- */
- __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
- if (!atomic_sub_return(1, &cmci_storm_on_cpus))
- pr_notice("CMCI storm subsided: switching to interrupt mode\n");
+ return false;
+}
- fallthrough;
+/*
+ * Decide which CMCI interrupt threshold to use:
+ * 1: If this bank is in storm mode from whichever CPU was
+ * the previous owner, stay in storm mode.
+ * 2: If ignoring any threshold set by BIOS, set Linux default
+ * 3: Try to honor BIOS threshold (unless buggy BIOS set it at zero).
+ */
+static u64 cmci_pick_threshold(u64 val, int *bios_zero_thresh)
+{
+ if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
+ return val;
- case CMCI_STORM_SUBSIDED:
+ if (!mca_cfg.bios_cmci_threshold) {
+ val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
+ val |= CMCI_THRESHOLD;
+ } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
/*
- * We wait for all CPUs to go back to SUBSIDED state. When that
- * happens we switch back to interrupt mode.
+ * If bios_cmci_threshold boot option was specified
+ * but the threshold is zero, we'll try to initialize
+ * it to 1.
*/
- if (!atomic_read(&cmci_storm_on_cpus)) {
- __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
- cmci_toggle_interrupt_mode(true);
- cmci_recheck();
- }
- return CMCI_POLL_INTERVAL;
- default:
-
- /* We have shiny weather. Let the poll do whatever it thinks. */
- return interval;
+ *bios_zero_thresh = 1;
+ val |= CMCI_THRESHOLD;
}
+
+ return val;
}
-static bool cmci_storm_detect(void)
+/*
+ * Try to claim ownership of a bank.
+ */
+static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_wrong_thresh)
{
- unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
- unsigned long ts = __this_cpu_read(cmci_time_stamp);
- unsigned long now = jiffies;
- int r;
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
- if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
- return true;
+ val |= MCI_CTL2_CMCI_EN;
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
- if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
- cnt++;
- } else {
- cnt = 1;
- __this_cpu_write(cmci_time_stamp, now);
+ /* If the enable bit did not stick, this bank should be polled. */
+ if (!(val & MCI_CTL2_CMCI_EN)) {
+ WARN_ON(!test_bit(bank, this_cpu_ptr(mce_poll_banks)));
+ storm->banks[bank].poll_only = true;
+ return;
}
- __this_cpu_write(cmci_storm_cnt, cnt);
- if (cnt <= CMCI_STORM_THRESHOLD)
- return false;
-
- cmci_toggle_interrupt_mode(false);
- __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
- r = atomic_add_return(1, &cmci_storm_on_cpus);
- mce_timer_kick(CMCI_STORM_INTERVAL);
- this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
+ /* This CPU successfully set the enable bit. */
+ set_bit(bank, (void *)this_cpu_ptr(&mce_banks_owned));
- if (r == 1)
- pr_notice("CMCI storm detected: switching to poll mode\n");
- return true;
-}
+ if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD) {
+ pr_notice("CPU%d BANK%d CMCI inherited storm\n", smp_processor_id(), bank);
+ mce_inherit_storm(bank);
+ cmci_storm_begin(bank);
+ } else {
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
+ }
-/*
- * The interrupt handler. This is called on every event.
- * Just call the poller directly to log any events.
- * This could in theory increase the threshold under high load,
- * but doesn't for now.
- */
-static void intel_threshold_interrupt(void)
-{
- if (cmci_storm_detect())
- return;
+ /*
+ * We are able to set thresholds for some banks that
+ * had a threshold of 0. This means the BIOS has not
+ * set the thresholds properly or does not work with
+ * this boot option. Note down now and report later.
+ */
+ if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
+ (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
+ *bios_wrong_thresh = 1;
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
+ /* Save default threshold for each bank */
+ if (cmci_threshold[bank] == 0)
+ cmci_threshold[bank] = val & MCI_CTL2_CMCI_THRESHOLD_MASK;
}
/*
* Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
* on this CPU. Use the algorithm recommended in the SDM to discover shared
- * banks.
+ * banks. Called during initial bootstrap, and also for hotplug CPU operations
+ * to rediscover/reassign machine check banks.
*/
static void cmci_discover(int banks)
{
- unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
+ int bios_wrong_thresh = 0;
unsigned long flags;
int i;
- int bios_wrong_thresh = 0;
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
for (i = 0; i < banks; i++) {
u64 val;
int bios_zero_thresh = 0;
- if (test_bit(i, owned))
+ if (cmci_skip_bank(i, &val))
continue;
- /* Skip banks in firmware first mode */
- if (test_bit(i, mce_banks_ce_disabled))
- continue;
-
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
-
- /* Already owned by someone else? */
- if (val & MCI_CTL2_CMCI_EN) {
- clear_bit(i, owned);
- __clear_bit(i, this_cpu_ptr(mce_poll_banks));
- continue;
- }
-
- if (!mca_cfg.bios_cmci_threshold) {
- val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
- val |= CMCI_THRESHOLD;
- } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
- /*
- * If bios_cmci_threshold boot option was specified
- * but the threshold is zero, we'll try to initialize
- * it to 1.
- */
- bios_zero_thresh = 1;
- val |= CMCI_THRESHOLD;
- }
-
- val |= MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
-
- /* Did the enable bit stick? -- the bank supports CMCI */
- if (val & MCI_CTL2_CMCI_EN) {
- set_bit(i, owned);
- __clear_bit(i, this_cpu_ptr(mce_poll_banks));
- /*
- * We are able to set thresholds for some banks that
- * had a threshold of 0. This means the BIOS has not
- * set the thresholds properly or does not work with
- * this boot option. Note down now and report later.
- */
- if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
- (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
- bios_wrong_thresh = 1;
- } else {
- WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
- }
+ val = cmci_pick_threshold(val, &bios_zero_thresh);
+ cmci_claim_bank(i, val, bios_zero_thresh, &bios_wrong_thresh);
}
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
@@ -370,6 +327,9 @@ static void __cmci_disable_bank(int bank)
val &= ~MCI_CTL2_CMCI_EN;
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
+
+ if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
+ cmci_storm_end(bank);
}
/*
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index e13a26c9c0ac..01f8f03969e6 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -41,9 +41,7 @@ struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled;
#ifdef CONFIG_X86_MCE_INTEL
-unsigned long cmci_intel_adjust_timer(unsigned long interval);
-bool mce_intel_cmci_poll(void);
-void mce_intel_hcpu_update(unsigned long cpu);
+void mce_intel_handle_storm(int bank, bool on);
void cmci_disable_bank(int bank);
void intel_init_cmci(void);
void intel_init_lmce(void);
@@ -51,9 +49,7 @@ void intel_clear_lmce(void);
bool intel_filter_mce(struct mce *m);
bool intel_mce_usable_address(struct mce *m);
#else
-# define cmci_intel_adjust_timer mce_adjust_timer_default
-static inline bool mce_intel_cmci_poll(void) { return false; }
-static inline void mce_intel_hcpu_update(unsigned long cpu) { }
+static inline void mce_intel_handle_storm(int bank, bool on) { }
static inline void cmci_disable_bank(int bank) { }
static inline void intel_init_cmci(void) { }
static inline void intel_init_lmce(void) { }
@@ -62,7 +58,63 @@ static inline bool intel_filter_mce(struct mce *m) { return false; }
static inline bool intel_mce_usable_address(struct mce *m) { return false; }
#endif
-void mce_timer_kick(unsigned long interval);
+void mce_timer_kick(bool storm);
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+void cmci_storm_begin(unsigned int bank);
+void cmci_storm_end(unsigned int bank);
+void mce_track_storm(struct mce *mce);
+void mce_inherit_storm(unsigned int bank);
+bool mce_get_storm_mode(void);
+void mce_set_storm_mode(bool storm);
+#else
+static inline void cmci_storm_begin(unsigned int bank) {}
+static inline void cmci_storm_end(unsigned int bank) {}
+static inline void mce_track_storm(struct mce *mce) {}
+static inline void mce_inherit_storm(unsigned int bank) {}
+static inline bool mce_get_storm_mode(void) { return false; }
+static inline void mce_set_storm_mode(bool storm) {}
+#endif
+
+/*
+ * history: Bitmask tracking errors occurrence. Each set bit
+ * represents an error seen.
+ *
+ * timestamp: Last time (in jiffies) that the bank was polled.
+ * in_storm_mode: Is this bank in storm mode?
+ * poll_only: Bank does not support CMCI, skip storm tracking.
+ */
+struct storm_bank {
+ u64 history;
+ u64 timestamp;
+ bool in_storm_mode;
+ bool poll_only;
+};
+
+#define NUM_HISTORY_BITS (sizeof(u64) * BITS_PER_BYTE)
+
+/* How many errors within the history buffer mark the start of a storm. */
+#define STORM_BEGIN_THRESHOLD 5
+
+/*
+ * How many polls of machine check bank without an error before declaring
+ * the storm is over. Since it is tracked by the bitmasks in the history
+ * field of struct storm_bank the mask is 30 bits [0 ... 29].
+ */
+#define STORM_END_POLL_THRESHOLD 29
+
+/*
+ * banks: per-cpu, per-bank details
+ * stormy_bank_count: count of MC banks in storm state
+ * poll_mode: CPU is in poll mode
+ */
+struct mca_storm_desc {
+ struct storm_bank banks[MAX_NR_BANKS];
+ u8 stormy_bank_count;
+ bool poll_mode;
+};
+
+DECLARE_PER_CPU(struct mca_storm_desc, storm_desc);
#ifdef CONFIG_ACPI_APEI
int apei_write_mce(struct mce *m);
diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c
index ef4e7bb5fd88..89e31e1e5c9c 100644
--- a/arch/x86/kernel/cpu/mce/threshold.c
+++ b/arch/x86/kernel/cpu/mce/threshold.c
@@ -29,3 +29,118 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_threshold)
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
apic_eoi();
}
+
+DEFINE_PER_CPU(struct mca_storm_desc, storm_desc);
+
+void mce_inherit_storm(unsigned int bank)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+
+ /*
+ * Previous CPU owning this bank had put it into storm mode,
+ * but the precise history of that storm is unknown. Assume
+ * the worst (all recent polls of the bank found a valid error
+ * logged). This will avoid the new owner prematurely declaring
+ * the storm has ended.
+ */
+ storm->banks[bank].history = ~0ull;
+ storm->banks[bank].timestamp = jiffies;
+}
+
+bool mce_get_storm_mode(void)
+{
+ return __this_cpu_read(storm_desc.poll_mode);
+}
+
+void mce_set_storm_mode(bool storm)
+{
+ __this_cpu_write(storm_desc.poll_mode, storm);
+}
+
+static void mce_handle_storm(unsigned int bank, bool on)
+{
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ mce_intel_handle_storm(bank, on);
+ break;
+ }
+}
+
+void cmci_storm_begin(unsigned int bank)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+
+ __set_bit(bank, this_cpu_ptr(mce_poll_banks));
+ storm->banks[bank].in_storm_mode = true;
+
+ /*
+ * If this is the first bank on this CPU to enter storm mode
+ * start polling.
+ */
+ if (++storm->stormy_bank_count == 1)
+ mce_timer_kick(true);
+}
+
+void cmci_storm_end(unsigned int bank)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
+ storm->banks[bank].history = 0;
+ storm->banks[bank].in_storm_mode = false;
+
+ /* If no banks left in storm mode, stop polling. */
+ if (!this_cpu_dec_return(storm_desc.stormy_bank_count))
+ mce_timer_kick(false);
+}
+
+void mce_track_storm(struct mce *mce)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+ unsigned long now = jiffies, delta;
+ unsigned int shift = 1;
+ u64 history = 0;
+
+ /* No tracking needed for banks that do not support CMCI */
+ if (storm->banks[mce->bank].poll_only)
+ return;
+
+ /*
+ * When a bank is in storm mode it is polled once per second and
+ * the history mask will record about the last minute of poll results.
+ * If it is not in storm mode, then the bank is only checked when
+ * there is a CMCI interrupt. Check how long it has been since
+ * this bank was last checked, and adjust the amount of "shift"
+ * to apply to history.
+ */
+ if (!storm->banks[mce->bank].in_storm_mode) {
+ delta = now - storm->banks[mce->bank].timestamp;
+ shift = (delta + HZ) / HZ;
+ }
+
+ /* If it has been a long time since the last poll, clear history. */
+ if (shift < NUM_HISTORY_BITS)
+ history = storm->banks[mce->bank].history << shift;
+
+ storm->banks[mce->bank].timestamp = now;
+
+ /* History keeps track of corrected errors. VAL=1 && UC=0 */
+ if ((mce->status & MCI_STATUS_VAL) && mce_is_correctable(mce))
+ history |= 1;
+
+ storm->banks[mce->bank].history = history;
+
+ if (storm->banks[mce->bank].in_storm_mode) {
+ if (history & GENMASK_ULL(STORM_END_POLL_THRESHOLD, 0))
+ return;
+ printk_deferred(KERN_NOTICE "CPU%d BANK%d CMCI storm subsided\n", smp_processor_id(), mce->bank);
+ mce_handle_storm(mce->bank, false);
+ cmci_storm_end(mce->bank);
+ } else {
+ if (hweight64(history) < STORM_BEGIN_THRESHOLD)
+ return;
+ printk_deferred(KERN_NOTICE "CPU%d BANK%d CMCI storm detected\n", smp_processor_id(), mce->bank);
+ mce_handle_storm(mce->bank, true);
+ cmci_storm_begin(mce->bank);
+ }
+}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 070426b9895f..857e608af641 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -370,14 +370,14 @@ static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *
{
struct cpio_data cp;
+ intel_collect_cpu_info(&uci->cpu_sig);
+
if (!load_builtin_intel_microcode(&cp))
cp = find_microcode_in_initrd(ucode_path);
if (!(cp.data && cp.size))
return NULL;
- intel_collect_cpu_info(&uci->cpu_sig);
-
return scan_microcode(cp.data, cp.size, uci, save);
}
@@ -410,13 +410,13 @@ void __init load_ucode_intel_bsp(struct early_load_data *ed)
{
struct ucode_cpu_info uci;
- ed->old_rev = intel_get_microcode_revision();
-
uci.mc = get_microcode_blob(&uci, false);
- if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
- ucode_patch_va = UCODE_BSP_LOADED;
+ ed->old_rev = uci.cpu_sig.rev;
- ed->new_rev = uci.cpu_sig.rev;
+ if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) {
+ ucode_patch_va = UCODE_BSP_LOADED;
+ ed->new_rev = uci.cpu_sig.rev;
+ }
}
void load_ucode_intel_ap(void)
@@ -457,12 +457,6 @@ static enum ucode_state apply_microcode_late(int cpu)
if (ret != UCODE_UPDATED && ret != UCODE_OK)
return ret;
- if (!cpu && uci->cpu_sig.rev != cur_rev) {
- pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n",
- uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24,
- (mc->hdr.date >> 16) & 0xff);
- }
-
cpu_data(cpu).microcode = uci->cpu_sig.rev;
if (!cpu)
boot_cpu_data.microcode = uci->cpu_sig.rev;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 2d6aa5d2e3d7..d3524778a545 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -428,6 +428,10 @@ void __init mtrr_copy_map(void)
* from the x86_init.hyper.init_platform() hook. It can be called only once.
* The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR
* is cleared.
+ *
+ * @var: MTRR variable range array to use
+ * @num_var: length of the @var array
+ * @def_type: default caching type
*/
void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
mtrr_type def_type)
@@ -492,13 +496,15 @@ static u8 type_merge(u8 type, u8 new_type, u8 *uniform)
/**
* mtrr_type_lookup - look up memory type in MTRR
*
+ * @start: Begin of the physical address range
+ * @end: End of the physical address range
+ * @uniform: output argument:
+ * - 1: the returned MTRR type is valid for the whole region
+ * - 0: otherwise
+ *
* Return Values:
* MTRR_TYPE_(type) - The effective MTRR type for the region
* MTRR_TYPE_INVALID - MTRR is disabled
- *
- * Output Argument:
- * uniform - Set to 1 when the returned MTRR type is valid for the whole
- * region, set to 0 else.
*/
u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
{
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 5d390df21440..b65ab214bdf5 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -581,7 +581,7 @@ err_out:
*
* Flush any outstanding enqueued EADD operations and perform EINIT. The
* Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
- * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ * the enclave's MRSIGNER, which is calculated from the provided sigstruct.
*
* Return:
* - 0: Success.
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c92d88680dbf..b6b044356f1b 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -170,7 +170,7 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
int ret = 0;
/* Exclude the low 1M because it is always reserved */
- ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
+ ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
if (ret)
return ret;
@@ -198,8 +198,8 @@ static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
}
/* Prepare elf headers. Return addr and size */
-static int prepare_elf_headers(struct kimage *image, void **addr,
- unsigned long *sz, unsigned long *nr_mem_ranges)
+static int prepare_elf_headers(void **addr, unsigned long *sz,
+ unsigned long *nr_mem_ranges)
{
struct crash_mem *cmem;
int ret;
@@ -221,7 +221,7 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
*nr_mem_ranges = cmem->nr_ranges;
/* By default prepare 64bit headers */
- ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
+ ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
out:
vfree(cmem);
@@ -349,7 +349,7 @@ int crash_load_segments(struct kimage *image)
.buf_max = ULONG_MAX, .top_down = false };
/* Prepare elf headers and add a segment */
- ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz, &pnum);
+ ret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum);
if (ret)
return ret;
@@ -386,8 +386,8 @@ int crash_load_segments(struct kimage *image)
if (ret)
return ret;
image->elf_load_addr = kbuf.mem;
- pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
return ret;
}
@@ -452,7 +452,7 @@ void arch_crash_handle_hotplug_event(struct kimage *image)
* Create the new elfcorehdr reflecting the changes to CPU and/or
* memory resources.
*/
- if (prepare_elf_headers(image, &elfbuf, &elfsz, &nr_mem_ranges)) {
+ if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {
pr_err("unable to create new elfcorehdr");
goto out;
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index a21a4d0ecc34..520deb411a70 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
* Must be invoked from KVM after a VMEXIT before enabling interrupts when
* XFD write emulation is disabled. This is required because the guest can
* freely modify XFD and the state at VMEXIT is not guaranteed to be the
- * same as the state on VMENTER. So software state has to be udpated before
+ * same as the state on VMENTER. So software state has to be updated before
* any operation which depends on it can take place.
*
* Note: It can be invoked unconditionally even when write emulation is
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 05a110c97111..dc0956067944 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -71,9 +71,9 @@ EXPORT_SYMBOL(vmemmap_base);
* GDT used on the boot CPU before switching to virtual addresses.
*/
static struct desc_struct startup_gdt[GDT_ENTRIES] __initdata = {
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
};
/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0f8103240fda..d4918d03efb4 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -114,6 +114,28 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Form the CR3 value being sure to include the CR3 modifier */
addq $(early_top_pgt - __START_KERNEL_map), %rax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ mov %rax, %rdi
+ mov %rax, %r14
+
+ addq phys_base(%rip), %rdi
+
+ /*
+ * For SEV guests: Verify that the C-bit is correct. A malicious
+ * hypervisor could lie about the C-bit position to perform a ROP
+ * attack on the guest by writing to the unencrypted stack and wait for
+ * the next RET instruction.
+ */
+ call sev_verify_cbit
+
+ /*
+ * Restore CR3 value without the phys_base which will be added
+ * below, before writing %cr3.
+ */
+ mov %r14, %rax
+#endif
+
jmp 1f
SYM_CODE_END(startup_64)
@@ -182,7 +204,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Enable PAE mode, PSE, PGE and LA57 */
orl $(X86_CR4_PAE | X86_CR4_PSE | X86_CR4_PGE), %ecx
#ifdef CONFIG_X86_5LEVEL
- testl $1, __pgtable_l5_enabled(%rip)
+ testb $1, __pgtable_l5_enabled(%rip)
jz 1f
orl $X86_CR4_LA57, %ecx
1:
@@ -193,21 +215,12 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
addq phys_base(%rip), %rax
/*
- * For SEV guests: Verify that the C-bit is correct. A malicious
- * hypervisor could lie about the C-bit position to perform a ROP
- * attack on the guest by writing to the unencrypted stack and wait for
- * the next RET instruction.
- */
- movq %rax, %rdi
- call sev_verify_cbit
-
- /*
* Switch to new page-table
*
* For the boot CPU this switches to early_top_pgt which still has the
- * indentity mappings present. The secondary CPUs will switch to the
+ * identity mappings present. The secondary CPUs will switch to the
* init_top_pgt here, away from the trampoline_pgd and unmap the
- * indentity mapped ranges.
+ * identity mapped ranges.
*/
movq %rax, %cr3
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 41eecf180b7f..8ff2bf921519 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -707,7 +707,7 @@ static void __init hpet_select_clockevents(void)
hpet_base.nr_clockevents = 0;
- /* No point if MSI is disabled or CPU has an Always Runing APIC Timer */
+ /* No point if MSI is disabled or CPU has an Always Running APIC Timer */
if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT))
return;
@@ -965,7 +965,7 @@ static bool __init mwait_pc10_supported(void)
* and per CPU timer interrupts.
*
* The probability that this problem is going to be solved in the
- * forseeable future is close to zero, so the kernel has to be cluttered
+ * foreseeable future is close to zero, so the kernel has to be cluttered
* with heuristics to keep up with the ever growing amount of hardware and
* firmware trainwrecks. Hopefully some day hardware people will understand
* that the approach of "This can be fixed in software" is not sustainable.
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index a61c12c01270..2a422e00ed4b 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -82,7 +82,7 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params,
cmdline_ptr[cmdline_len - 1] = '\0';
- pr_debug("Final command line is: %s\n", cmdline_ptr);
+ kexec_dprintk("Final command line is: %s\n", cmdline_ptr);
cmdline_ptr_phys = bootparams_load_addr + cmdline_offset;
cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL;
cmdline_ext_32 = cmdline_ptr_phys >> 32;
@@ -272,7 +272,12 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
nr_e820_entries = params->e820_entries;
+ kexec_dprintk("E820 memmap:\n");
for (i = 0; i < nr_e820_entries; i++) {
+ kexec_dprintk("%016llx-%016llx (%d)\n",
+ params->e820_table[i].addr,
+ params->e820_table[i].addr + params->e820_table[i].size - 1,
+ params->e820_table[i].type);
if (params->e820_table[i].type != E820_TYPE_RAM)
continue;
start = params->e820_table[i].addr;
@@ -424,7 +429,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* command line. Make sure it does not overflow
*/
if (cmdline_len + MAX_ELFCOREHDR_STR_LEN > header->cmdline_size) {
- pr_debug("Appending elfcorehdr=<addr> to command line exceeds maximum allowed length\n");
+ pr_err("Appending elfcorehdr=<addr> to command line exceeds maximum allowed length\n");
return ERR_PTR(-EINVAL);
}
@@ -445,7 +450,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
return ERR_PTR(ret);
}
- pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
+ kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem);
/*
@@ -490,8 +495,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
if (ret)
goto out_free_params;
bootparam_load_addr = kbuf.mem;
- pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- bootparam_load_addr, kbuf.bufsz, kbuf.bufsz);
+ kexec_dprintk("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ bootparam_load_addr, kbuf.bufsz, kbuf.memsz);
/* Load kernel */
kbuf.buffer = kernel + kern16_size;
@@ -505,8 +510,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
goto out_free_params;
kernel_load_addr = kbuf.mem;
- pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- kernel_load_addr, kbuf.bufsz, kbuf.memsz);
+ kexec_dprintk("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_load_addr, kbuf.bufsz, kbuf.memsz);
/* Load initrd high */
if (initrd) {
@@ -520,8 +525,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
goto out_free_params;
initrd_load_addr = kbuf.mem;
- pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- initrd_load_addr, initrd_len, initrd_len);
+ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, initrd_len, initrd_len);
setup_initrd(params, initrd_load_addr, initrd_len);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e8babebad7b8..a0ce46c0a2d8 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -576,7 +576,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
- int3_emulate_call(regs, regs_get_register(regs, offs));
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
+ int3_emulate_jmp(regs, regs_get_register(regs, offs));
}
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 0ddb3bd0f1aa..dfe9945b9bec 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -803,8 +803,8 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
"setne %al\n\t"
-DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
- PV_VCPU_PREEMPTED_ASM, .text);
+DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted,
+ PV_VCPU_PREEMPTED_ASM, .text);
#endif
static void __init kvm_guest_init(void)
@@ -942,7 +942,7 @@ static void __init kvm_init_platform(void)
* Reset the host's shared pages list related to kernel
* specific page encryption status settings before we load a
* new kernel by kexec. Reset the page encryption status
- * during early boot intead of just before kexec to avoid SMP
+ * during early boot instead of just before kexec to avoid SMP
* races during kvm_pv_guest_cpu_reboot().
* NOTE: We cannot reset the complete shared pages list
* here as we need to retain the UEFI/OVMF firmware
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index fb8f52149be9..a95d0900e8c6 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -42,7 +42,7 @@ static int __init parse_no_kvmclock_vsyscall(char *arg)
}
early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
+/* Aligned to page sizes to match what's mapped via vsyscalls to userspace */
#define HVC_BOOT_ARRAY_SIZE \
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index adc67f98819a..7a814b41402d 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -7,7 +7,7 @@
* This handles calls from both 32bit and 64bit mode.
*
* Lock order:
- * contex.ldt_usr_sem
+ * context.ldt_usr_sem
* mmap_lock
* context.lock
*/
@@ -49,7 +49,7 @@ void load_mm_ldt(struct mm_struct *mm)
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
- * after the IPI is handled by all such CPUs. This means that,
+ * after the IPI is handled by all such CPUs. This means that
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
@@ -685,7 +685,7 @@ SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
}
/*
* The SYSCALL_DEFINE() macros give us an 'unsigned long'
- * return type, but tht ABI for sys_modify_ldt() expects
+ * return type, but the ABI for sys_modify_ldt() expects
* 'int'. This cast gives us an int-sized value in %rax
* for the return code. The 'unsigned' is necessary so
* the compiler does not try to sign-extend the negative
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 1a3e2c05a8a5..bc0a5348b4a6 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -42,12 +42,9 @@ struct init_pgtable_data {
static int mem_region_callback(struct resource *res, void *arg)
{
struct init_pgtable_data *data = arg;
- unsigned long mstart, mend;
-
- mstart = res->start;
- mend = mstart + resource_size(res) - 1;
- return kernel_ident_mapping_init(data->info, data->level4p, mstart, mend);
+ return kernel_ident_mapping_init(data->info, data->level4p,
+ res->start, res->end + 1);
}
static int
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 5f71a0cf4399..e18914c0e38a 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -276,7 +276,7 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *s, *alt = NULL, *locks = NULL,
- *para = NULL, *orc = NULL, *orc_ip = NULL,
+ *orc = NULL, *orc_ip = NULL,
*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
*calls = NULL, *cfi = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
@@ -286,8 +286,6 @@ int module_finalize(const Elf_Ehdr *hdr,
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks = s;
- if (!strcmp(".parainstructions", secstrings + s->sh_name))
- para = s;
if (!strcmp(".orc_unwind", secstrings + s->sh_name))
orc = s;
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
@@ -304,14 +302,6 @@ int module_finalize(const Elf_Ehdr *hdr,
ibt_endbr = s;
}
- /*
- * See alternative_instructions() for the ordering rules between the
- * various patching types.
- */
- if (para) {
- void *pseg = (void *)para->sh_addr;
- apply_paravirt(pseg, pseg + para->sh_size);
- }
if (retpolines || cfi) {
void *rseg = NULL, *cseg = NULL;
unsigned int rsize = 0, csize = 0;
@@ -341,7 +331,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
- if (calls || para) {
+ if (calls || alt) {
struct callthunk_sites cs = {};
if (calls) {
@@ -349,9 +339,9 @@ int module_finalize(const Elf_Ehdr *hdr,
cs.call_end = (void *)calls->sh_addr + calls->sh_size;
}
- if (para) {
- cs.pv_start = (void *)para->sh_addr;
- cs.pv_end = (void *)para->sh_addr + para->sh_size;
+ if (alt) {
+ cs.alt_start = (void *)alt->sh_addr;
+ cs.alt_end = (void *)alt->sh_addr + alt->sh_size;
}
callthunks_patch_module_calls(&cs, me);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 97f1436c1a20..5358d43886ad 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -34,14 +34,8 @@
#include <asm/io_bitmap.h>
#include <asm/gsseg.h>
-/*
- * nop stub, which must not clobber anything *including the stack* to
- * avoid confusing the entry prologues.
- */
-DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
-
/* stub always returning 0. */
-DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text);
+DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
void __init default_banner(void)
{
@@ -49,26 +43,12 @@ void __init default_banner(void)
pv_info.name);
}
-/* Undefined instruction for dealing with missing ops pointers. */
-noinstr void paravirt_BUG(void)
-{
- BUG();
-}
-
-static unsigned paravirt_patch_call(void *insn_buff, const void *target,
- unsigned long addr, unsigned len)
-{
- __text_gen_insn(insn_buff, CALL_INSN_OPCODE,
- (void *)addr, target, CALL_INSN_SIZE);
- return CALL_INSN_SIZE;
-}
-
#ifdef CONFIG_PARAVIRT_XXL
-DEFINE_PARAVIRT_ASM(_paravirt_ident_64, "mov %rdi, %rax", .text);
-DEFINE_PARAVIRT_ASM(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
-DEFINE_PARAVIRT_ASM(pv_native_irq_disable, "cli", .noinstr.text);
-DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text);
-DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
+DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
+DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
+DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
+DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
+DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -85,28 +65,6 @@ static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
tlb_remove_page(tlb, table);
}
-unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
- unsigned int len)
-{
- /*
- * Neat trick to map patch type back to the call within the
- * corresponding structure.
- */
- void *opfunc = *((void **)&pv_ops + type);
- unsigned ret;
-
- if (opfunc == NULL)
- /* If there's no function, patch it with paravirt_BUG() */
- ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
- else if (opfunc == _paravirt_nop)
- ret = 0;
- else
- /* Otherwise call the function. */
- ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
-
- return ret;
-}
-
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b6f4e8399fca..ab49ade31b0d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -477,7 +477,7 @@ void native_tss_update_io_bitmap(void)
/*
* Make sure that the TSS limit is covering the IO bitmap. It might have
* been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
- * access from user space to trigger a #GP because tbe bitmap is outside
+ * access from user space to trigger a #GP because the bitmap is outside
* the TSS limit.
*/
refresh_tss_limit();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1526747bedf2..ec2c21a1844e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -226,8 +226,6 @@ static void __init reserve_brk(void)
_brk_start = 0;
}
-u64 relocated_ramdisk;
-
#ifdef CONFIG_BLK_DEV_INITRD
static u64 __init get_ramdisk_image(void)
@@ -261,7 +259,7 @@ static void __init relocate_initrd(void)
u64 area_size = PAGE_ALIGN(ramdisk_size);
/* We need to move the initrd down into directly mapped mem */
- relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
+ u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
PFN_PHYS(max_pfn_mapped));
if (!relocated_ramdisk)
panic("Cannot find place for new RAMDISK of size %lld\n",
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 2c97bf7b56ae..b30d6e180df7 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -106,8 +106,8 @@ void __init pcpu_populate_pte(unsigned long addr)
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
- struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
- 0xFFFFF);
+ struct desc_struct d = GDT_ENTRY_INIT(DESC_DATA32,
+ per_cpu_offset(cpu), 0xFFFFF);
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
#endif
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index ccb0915e84e1..1d24ec679915 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -96,7 +96,7 @@ static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
/* Tell the hypervisor what went wrong. */
val |= GHCB_SEV_TERM_REASON(set, reason);
- /* Request Guest Termination from Hypvervisor */
+ /* Request Guest Termination from Hypervisor */
sev_es_wr_ghcb_msr(val);
VMGEXIT();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2cc2aa120b4b..3f57ce68a3f1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -757,6 +757,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
{
return cpu_l2c_shared_mask(cpu);
}
+EXPORT_SYMBOL_GPL(cpu_clustergroup_mask);
static void impress_friends(void)
{
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 54a5596adaa6..a349dbfc6d5a 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -267,19 +267,6 @@ SECTIONS
}
#endif
- /*
- * start address and size of operations which during runtime
- * can be patched with virtualization friendly instructions or
- * baremetal native ones. Think page table operations.
- * Details in paravirt_types.h
- */
- . = ALIGN(8);
- .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
- __parainstructions = .;
- *(.parainstructions)
- __parainstructions_end = .;
- }
-
#ifdef CONFIG_RETPOLINE
/*
* List of instructions that call/jmp/jcc to retpoline thunks
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index dda6fc4cfae8..42d3f47f4c07 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -105,7 +105,7 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
/*
* If the index isn't significant, use the first entry with a
- * matching function. It's userspace's responsibilty to not
+ * matching function. It's userspace's responsibility to not
* provide "duplicate" entries in all cases.
*/
if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238afd7335e4..4943f6b2bbee 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2388,7 +2388,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *h
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
- eventfd_signal(eventfd, 1);
+ eventfd_signal(eventfd);
return HV_STATUS_SUCCESS;
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c57e181bba21..0b1f991b9a31 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -987,7 +987,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
/*
* The head descriptor is empty. If there are no tail descriptors,
- * nullify the rmap head to mark the list as emtpy, else point the rmap
+ * nullify the rmap head to mark the list as empty, else point the rmap
* head at the next descriptor, i.e. the new head.
*/
if (!head_desc->more)
@@ -6544,7 +6544,7 @@ void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
/*
- * A TLB flush is unnecessary at this point for the same resons as in
+ * A TLB flush is unnecessary at this point for the same reasons as in
* kvm_mmu_slot_try_split_huge_pages().
*/
}
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index bd30ebfb2f2c..04c247bfe318 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -146,7 +146,7 @@ static bool try_step_up(struct tdp_iter *iter)
* Step to the next SPTE in a pre-order traversal of the paging structure.
* To get to the next SPTE, the iterator either steps down towards the goal
* GFN, if at a present, non-last-level SPTE, or over to a SPTE mapping a
- * highter GFN.
+ * higher GFN.
*
* The basic algorithm is as follows:
* 1. If the current SPTE is a non-last-level SPTE, step down into the page
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a8bd4e909a1e..7fb51424fc74 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4744,7 +4744,7 @@ static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* Emulation is possible for SEV guests if and only if a prefilled
* buffer containing the bytes of the intercepted instruction is
* available. SEV guest memory is encrypted with a guest specific key
- * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
+ * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and
* decode garbage.
*
* If KVM is NOT trying to simply skip an instruction, inject #UD if
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c5ec0ef51ff7..65826fe23f33 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -6561,7 +6561,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* code was changed such that flag signals vmcs12 should
* be copied into eVMCS in guest memory.
*
- * To preserve backwards compatability, allow user
+ * To preserve backwards compatibility, allow user
* to set this flag even when there is no VMXON region.
*/
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index be20a60047b1..e0f86f11c345 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1809,7 +1809,7 @@ static void vmx_inject_exception(struct kvm_vcpu *vcpu)
* do generate error codes with bits 31:16 set, and so KVM's
* ABI lets userspace shove in arbitrary 32-bit values. Drop
* the upper bits to avoid VM-Fail, losing information that
- * does't really exist is preferable to killing the VM.
+ * doesn't really exist is preferable to killing the VM.
*/
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1a3aaa7dafae..cec0fc2a4b1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10165,7 +10165,7 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
*
* But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
* the instruction or inject an exception, then KVM can incorrecty inject a new
- * asynchrounous event if the event became pending after the CPU fetched the
+ * asynchronous event if the event became pending after the CPU fetched the
* instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
* occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
* injected on the restarted instruction instead of being deferred until the
@@ -10186,7 +10186,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
int r;
/*
- * Process nested events first, as nested VM-Exit supercedes event
+ * Process nested events first, as nested VM-Exit supersedes event
* re-injection. If there's an event queued for re-injection, it will
* be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
*/
@@ -10884,7 +10884,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* Assert that vCPU vs. VM APICv state is consistent. An APICv
* update must kick and wait for all vCPUs before toggling the
- * per-VM state, and responsing vCPUs must wait for the update
+ * per-VM state, and responding vCPUs must wait for the update
* to complete before servicing KVM_REQ_APICV_UPDATE.
*/
WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index e53fad915a62..523bb6df5ac9 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -2088,7 +2088,7 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
if (ret < 0 && ret != -ENOTCONN)
return false;
} else {
- eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
+ eventfd_signal(evtchnfd->deliver.eventfd.ctx);
}
*r = 0;
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index cea25ca8b8cf..c9dae65ac01b 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -11,26 +11,23 @@
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
-static inline unsigned short from32to16(unsigned a)
+static inline __wsum csum_finalize_sum(u64 temp64)
{
- unsigned short b = a >> 16;
- asm("addw %w2,%w0\n\t"
- "adcw $0,%w0\n"
- : "=r" (b)
- : "0" (b), "r" (a));
- return b;
+ return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
}
-static inline __wsum csum_tail(u64 temp64, int odd)
+static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5])
{
- unsigned int result;
-
- result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
- if (unlikely(odd)) {
- result = from32to16(result);
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
- }
- return (__force __wsum)result;
+ asm("addq %1,%0\n\t"
+ "adcq %2,%0\n\t"
+ "adcq %3,%0\n\t"
+ "adcq %4,%0\n\t"
+ "adcq %5,%0\n\t"
+ "adcq $0,%0"
+ :"+r" (sum)
+ :"m" (m[0]), "m" (m[1]), "m" (m[2]),
+ "m" (m[3]), "m" (m[4]));
+ return sum;
}
/*
@@ -47,64 +44,32 @@ static inline __wsum csum_tail(u64 temp64, int odd)
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
- unsigned odd;
- odd = 1 & (unsigned long) buff;
- if (unlikely(odd)) {
- if (unlikely(len == 0))
- return sum;
- temp64 = ror32((__force u32)sum, 8);
- temp64 += (*(unsigned char *)buff << 8);
- len--;
- buff++;
+ /* Do two 40-byte chunks in parallel to get better ILP */
+ if (likely(len >= 80)) {
+ u64 temp64_2 = 0;
+ do {
+ temp64 = update_csum_40b(temp64, buff);
+ temp64_2 = update_csum_40b(temp64_2, buff + 40);
+ buff += 80;
+ len -= 80;
+ } while (len >= 80);
+
+ asm("addq %1,%0\n\t"
+ "adcq $0,%0"
+ :"+r" (temp64): "r" (temp64_2));
}
/*
- * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
- * has noticeable negative affect on codegen for all other cases with
- * minimal performance benefit here.
+ * len == 40 is the hot case due to IPv6 headers, so return
+ * early for that exact case without checking the tail bytes.
*/
- if (len == 40) {
- asm("addq 0*8(%[src]),%[res]\n\t"
- "adcq 1*8(%[src]),%[res]\n\t"
- "adcq 2*8(%[src]),%[res]\n\t"
- "adcq 3*8(%[src]),%[res]\n\t"
- "adcq 4*8(%[src]),%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r"(temp64)
- : [src] "r"(buff), "m"(*(const char(*)[40])buff));
- return csum_tail(temp64, odd);
- }
- if (unlikely(len >= 64)) {
- /*
- * Extra accumulators for better ILP in the loop.
- */
- u64 tmp_accum, tmp_carries;
-
- asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
- "xorl %k[tmp_carries],%k[tmp_carries]\n\t"
- "subl $64, %[len]\n\t"
- "1:\n\t"
- "addq 0*8(%[src]),%[res]\n\t"
- "adcq 1*8(%[src]),%[res]\n\t"
- "adcq 2*8(%[src]),%[res]\n\t"
- "adcq 3*8(%[src]),%[res]\n\t"
- "adcl $0,%k[tmp_carries]\n\t"
- "addq 4*8(%[src]),%[tmp_accum]\n\t"
- "adcq 5*8(%[src]),%[tmp_accum]\n\t"
- "adcq 6*8(%[src]),%[tmp_accum]\n\t"
- "adcq 7*8(%[src]),%[tmp_accum]\n\t"
- "adcl $0,%k[tmp_carries]\n\t"
- "addq $64, %[src]\n\t"
- "subl $64, %[len]\n\t"
- "jge 1b\n\t"
- "addq %[tmp_accum],%[res]\n\t"
- "adcq %[tmp_carries],%[res]\n\t"
- "adcq $0,%[res]"
- : [tmp_accum] "=&r"(tmp_accum),
- [tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
- [len] "+r"(len), [src] "+r"(buff)
- : "m"(*(const char *)buff));
+ if (len >= 40) {
+ temp64 = update_csum_40b(temp64, buff);
+ len -= 40;
+ if (!len)
+ return csum_finalize_sum(temp64);
+ buff += 40;
}
if (len & 32) {
@@ -143,7 +108,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
: [res] "+r"(temp64)
: [trail] "r"(trail));
}
- return csum_tail(temp64, odd);
+ return csum_finalize_sum(temp64);
}
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 0e65d00e2339..23f81ca3f06b 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -128,7 +128,7 @@ static void delay_halt_mwaitx(u64 unused, u64 cycles)
delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
/*
- * Use cpu_tss_rw as a cacheline-aligned, seldomly accessed per-cpu
+ * Use cpu_tss_rw as a cacheline-aligned, seldom accessed per-cpu
* variable as the monitor target.
*/
__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
index 92cd8ecc3a2c..40b81c338ae5 100644
--- a/arch/x86/lib/misc.c
+++ b/arch/x86/lib/misc.c
@@ -8,7 +8,7 @@
*/
int num_digits(int val)
{
- int m = 10;
+ long long m = 10;
int d = 1;
if (val < 0) {
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ab778eac1952..679b09cfe241 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1370,6 +1370,8 @@ void do_user_addr_fault(struct pt_regs *regs,
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a190aae8ceaf..a0dffaca6d2b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1013,7 +1013,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
return;
}
- /* free a pte talbe */
+ /* free a pte table */
free_pagetable(pmd_page(*pmd), 0);
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
@@ -1031,7 +1031,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
return;
}
- /* free a pmd talbe */
+ /* free a pmd table */
free_pagetable(pud_page(*pud), 0);
spin_lock(&init_mm.page_table_lock);
pud_clear(pud);
@@ -1049,7 +1049,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
return;
}
- /* free a pud talbe */
+ /* free a pud table */
free_pagetable(p4d_page(*p4d), 0);
spin_lock(&init_mm.page_table_lock);
p4d_clear(p4d);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index b29ceb19e46e..adc497b93f03 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -450,37 +450,6 @@ int __node_distance(int from, int to)
EXPORT_SYMBOL(__node_distance);
/*
- * Sanity check to catch more bad NUMA configurations (they are amazingly
- * common). Make sure the nodes cover all memory.
- */
-static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
-{
- u64 numaram, e820ram;
- int i;
-
- numaram = 0;
- for (i = 0; i < mi->nr_blks; i++) {
- u64 s = mi->blk[i].start >> PAGE_SHIFT;
- u64 e = mi->blk[i].end >> PAGE_SHIFT;
- numaram += e - s;
- numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
- if ((s64)numaram < 0)
- numaram = 0;
- }
-
- e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
-
- /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
- if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
- printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
- (numaram << PAGE_SHIFT) >> 20,
- (e820ram << PAGE_SHIFT) >> 20);
- return false;
- }
- return true;
-}
-
-/*
* Mark all currently memblock-reserved physical memory (which covers the
* kernel's own memory ranges) as hot-unswappable.
*/
@@ -585,7 +554,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
return -EINVAL;
}
}
- if (!numa_meminfo_cover_memory(mi))
+
+ if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
/* Finally register nodes. */
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index de10800cd4dd..0904d7e8e126 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -14,7 +14,7 @@
* memory ranges: uncached, write-combining, write-through, write-protected,
* and the most commonly used and default attribute: write-back caching.
*
- * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is
+ * PAT support supersedes and augments MTRR support in a compatible fashion: MTRR is
* a hardware interface to enumerate a limited number of physical memory ranges
* and set their caching attributes explicitly, programmed into the CPU via MSRs.
* Even modern CPUs have MTRRs enabled - but these are typically not touched
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index bda9f129835e..e9b448d1b1b7 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1621,7 +1621,7 @@ repeat:
/*
* We need to keep the pfn from the existing PTE,
- * after all we're only going to change it's attributes
+ * after all we're only going to change its attributes
* not the memory it points to
*/
new_pte = pfn_pte(pfn, new_prot);
@@ -2447,7 +2447,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
/*
* The typical sequence for unmapping is to find a pte through
* lookup_address_in_pgd() (ideally, it should never return NULL because
- * the address is already mapped) and change it's protections. As pfn is
+ * the address is already mapped) and change its protections. As pfn is
* the *target* of a mapping, it's not useful while unmapping.
*/
struct cpa_data cpa = {
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 5dd733944629..669ba1c345b3 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -6,7 +6,7 @@
*
* https://github.com/IAIK/KAISER
*
- * The original work was written by and and signed off by for the Linux
+ * The original work was written by and signed off by for the Linux
* kernel by:
*
* Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 453ea95b667d..5768d386efab 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -355,7 +355,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
/*
* Validate that it is not running on an SMT sibling as this would
- * make the excercise pointless because the siblings share L1D. If
+ * make the exercise pointless because the siblings share L1D. If
* it runs on a SMT sibling, notify it with SIGBUS on return to
* user/guest
*/
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e89e415aa743..144a5839acfe 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2143,7 +2143,7 @@ static void save_args(const struct btf_func_model *m, u8 **prog,
} else {
/* Only copy the arguments on-stack to current
* 'stack_size' and ignore the regs, used to
- * prepare the arguments on-stack for orign call.
+ * prepare the arguments on-stack for origin call.
*/
if (for_call_origin) {
nr_regs += arg_regs;
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 429a89c5468b..b18ce19981ec 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1194,7 +1194,7 @@ struct jit_context {
#define PROLOGUE_SIZE 35
/*
- * Emit prologue code for BPF program and check it's size.
+ * Emit prologue code for BPF program and check its size.
* bpf_tail_call helper will skip it while jumping into another program.
*/
static void emit_prologue(u8 **pprog, u32 stack_depth)
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 7368afc03998..8c8ddc4dcc08 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -14,6 +14,7 @@
#include <linux/dma-map-ops.h>
#include <linux/swiotlb.h>
#include <asm/iommu.h>
+#include <asm/sta2x11.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index 761f3689f60a..84ba715f44d1 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -6,7 +6,7 @@
* Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
*
* IMR self test. The purpose of this module is to run a set of tests on the
- * IMR API to validate it's sanity. We check for overlapping, reserved
+ * IMR API to validate its sanity. We check for overlapping, reserved
* addresses and setup/teardown sanity.
*
*/
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index c4365a05ab83..f7235ef87bc3 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -11,6 +11,7 @@
#include <linux/elfnote.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/desc_defs.h>
#include <asm/segment.h>
#include <asm/asm.h>
#include <asm/boot.h>
@@ -41,7 +42,7 @@
* Bit 8 (TF) must be cleared. Other bits are all unspecified.
*
* All other processor registers and flag bits are unspecified. The OS is in
- * charge of setting up it's own stack, GDT and IDT.
+ * charge of setting up its own stack, GDT and IDT.
*/
#define PVH_GDT_ENTRY_CS 1
@@ -148,11 +149,11 @@ SYM_DATA_END(gdt)
SYM_DATA_START_LOCAL(gdt_start)
.quad 0x0000000000000000 /* NULL descriptor */
#ifdef CONFIG_X86_64
- .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
+ .quad GDT_ENTRY(DESC_CODE64, 0, 0xfffff) /* PVH_CS_SEL */
#else
- .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
+ .quad GDT_ENTRY(DESC_CODE32, 0, 0xfffff) /* PVH_CS_SEL */
#endif
- .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
+ .quad GDT_ENTRY(DESC_DATA32, 0, 0xfffff) /* PVH_DS_SEL */
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
.balign 16
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 4221259a5870..a379501b7a69 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -35,7 +35,7 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
- entry->delivery_mode = apic->delivery_mode;
+ entry->delivery_mode = APIC_DELIVERY_MODE_FIXED;
entry->dest_mode = apic->dest_mode_logical;
entry->polarity = 0;
entry->trigger = 0;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index e03207de2880..5c50e550ab63 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -741,7 +741,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
}
-/* Trigger a slave CPU to dump it's state */
+/* Trigger a slave CPU to dump its state */
static void uv_nmi_trigger_dump(int cpu)
{
int retry = uv_nmi_trigger_delay;
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index ff5afc8a5a41..3712afc3534d 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -270,7 +270,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
* Read the RTC.
*
* Starting with HUB rev 2.0, the UV RTC register is replicated across all
- * cachelines of it's own page. This allows faster simultaneous reads
+ * cachelines of its own page. This allows faster simultaneous reads
* from a given socket.
*/
static u64 uv_read_rtc(struct clocksource *cs)
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 788e5559549f..f9bc444a3064 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -61,7 +61,7 @@ void __init reserve_real_mode(void)
set_real_mode_mem(mem);
/*
- * Unconditionally reserve the entire fisrt 1M, see comment in
+ * Unconditionally reserve the entire first 1M, see comment in
* setup_arch().
*/
memblock_reserve(0, SZ_1M);
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index f10515b10e0a..e714b4624e36 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
+#include <asm/desc_defs.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
@@ -153,5 +154,5 @@ SYM_DATA_START(machine_real_restart_gdt)
* base value 0x100; since this is consistent with real mode
* semantics we don't have to reload the segments once CR0.PE = 0.
*/
- .quad GDT_ENTRY(0x0093, 0x100, 0xffff)
+ .quad GDT_ENTRY(DESC_DATA16, 0x100, 0xffff)
SYM_DATA_END(machine_real_restart_gdt)
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 90e820ac9771..7278e2545c35 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -17,7 +17,7 @@ reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
- cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | $(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
+ cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | $(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
quiet_cmd_sanitytest = TEST $@
cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
deleted file mode 100644
index a4cf678cf5c8..000000000000
--- a/arch/x86/tools/chkobjdump.awk
+++ /dev/null
@@ -1,34 +0,0 @@
-# GNU objdump version checker
-#
-# Usage:
-# objdump -v | awk -f chkobjdump.awk
-BEGIN {
- # objdump version 2.19 or later is OK for the test.
- od_ver = 2;
- od_sver = 19;
-}
-
-/^GNU objdump/ {
- verstr = ""
- gsub(/\(.*\)/, "");
- for (i = 3; i <= NF; i++)
- if (match($(i), "^[0-9]")) {
- verstr = $(i);
- break;
- }
- if (verstr == "") {
- printf("Warning: Failed to find objdump version number.\n");
- exit 0;
- }
- split(verstr, ver, ".");
- if (ver[1] > od_ver ||
- (ver[1] == od_ver && ver[2] >= od_sver)) {
- exit 1;
- } else {
- printf("Warning: objdump version %s is older than %d.%d\n",
- verstr, od_ver, od_sver);
- print("Warning: Skipping posttest.");
- # Logic is inverted, because we just skip test without error.
- exit 0;
- }
-}
diff --git a/arch/x86/tools/objdump_reformat.awk b/arch/x86/tools/objdump_reformat.awk
index f418c91b71f0..20b08a6c4d33 100644
--- a/arch/x86/tools/objdump_reformat.awk
+++ b/arch/x86/tools/objdump_reformat.awk
@@ -11,8 +11,8 @@ BEGIN {
prev_addr = ""
prev_hex = ""
prev_mnemonic = ""
- bad_expr = "(\\(bad\\)|^rex|^.byte|^rep(z|nz)$|^lock$|^es$|^cs$|^ss$|^ds$|^fs$|^gs$|^data(16|32)$|^addr(16|32|64))"
- fwait_expr = "^9b "
+ bad_expr = "(\\(bad\\)|<unknown>|^rex|^.byte|^rep(z|nz)$|^lock$|^es$|^cs$|^ss$|^ds$|^fs$|^gs$|^data(16|32)$|^addr(16|32|64))"
+ fwait_expr = "^9b[ \t]*fwait"
fwait_str="9b\tfwait"
}
@@ -22,7 +22,7 @@ BEGIN {
}
/^ *[0-9a-f]+:/ {
- if (split($0, field, "\t") < 3) {
+ if (split($0, field, /: |\t/) < 3) {
# This is a continuation of the same insn.
prev_hex = prev_hex field[2]
} else {
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index d30949e25ebd..a3bae2b24626 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -66,7 +66,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
- "(__parainstructions|__alt_instructions)(_end)?|"
+ "__alt_instructions(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
#if CONFIG_FW_LOADER
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 6092fea7d651..39982f955cfe 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -45,7 +45,7 @@ static const typeof(pv_ops) xen_irq_ops __initconst = {
/* Initial interrupt flag handling only called while interrupts off. */
.save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0),
.irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop),
- .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG),
+ .irq_enable = __PV_IS_CALLEE_SAVE(BUG_func),
.safe_halt = xen_safe_halt,
.halt = xen_halt,
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index b6830554ff69..72af496a160c 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -34,7 +34,7 @@
* would need to validate the whole pagetable before going on.
* Naturally, this is quite slow. The solution is to "pin" a
* pagetable, which enforces all the constraints on the pagetable even
- * when it is not actively in use. This menas that Xen can be assured
+ * when it is not actively in use. This means that Xen can be assured
* that it is still valid when you do load it into %cr3, and doesn't
* need to revalidate it.
*
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 7d792077e5fd..e031eaf36c99 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -793,7 +793,7 @@ config ARCH_FORCE_MAX_ORDER
default "10"
help
The kernel page allocator limits the size of maximal physically
- contiguous allocations. The limit is called MAX_ORDER and it
+ contiguous allocations. The limit is called MAX_PAGE_ORDER and it
defines the maximal power of two of number of pages that can be
allocated as a single contiguous block. This option allows
overriding the default setting when ability to allocate very
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
index 216b6f32c375..8d2b4248466f 100644
--- a/arch/xtensa/include/asm/kasan.h
+++ b/arch/xtensa/include/asm/kasan.h
@@ -18,6 +18,8 @@
#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
/* Size of the shadow map */
#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* End of the shadow map */
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
/* Offset for mem to shadow address transformation */
#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 06eefa9c1458..dd116598fb25 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -427,3 +427,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/block/Kconfig b/block/Kconfig
index 55ae2286a4de..1de4682d48cc 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -78,6 +78,26 @@ config BLK_DEV_INTEGRITY_T10
select CRC_T10DIF
select CRC64_ROCKSOFT
+config BLK_DEV_WRITE_MOUNTED
+ bool "Allow writing to mounted block devices"
+ default y
+ help
+ When a block device is mounted, writing to its buffer cache is very
+ likely going to cause filesystem corruption. It is also rather easy to
+ crash the kernel in this way since the filesystem has no practical way
+ of detecting these writes to buffer cache and verifying its metadata
+ integrity. However there are some setups that need this capability
+ like running fsck on read-only mounted root device, modifying some
+ features on mounted ext4 filesystem, and similar. If you say N, the
+ kernel will prevent processes from writing to block devices that are
+ mounted by filesystems which provides some more protection from runaway
+ privileged processes and generally makes it much harder to crash
+ filesystem drivers. Note however that this does not prevent
+ underlying device(s) from being modified by other means, e.g. by
+ directly submitting SCSI commands or through access to lower layers of
+ storage stack. If in doubt, say Y. The configuration can be overridden
+ with the bdev_allow_write_mounted boot option.
+
config BLK_DEV_ZONED
bool "Zoned block device support"
select MQ_IOSCHED_DEADLINE
diff --git a/block/bdev.c b/block/bdev.c
index 750aec178b6a..e9f1b12bd75c 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -30,6 +30,9 @@
#include "../fs/internal.h"
#include "blk.h"
+/* Should we allow writing to mounted block devices? */
+static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
+
struct bdev_inode {
struct block_device bdev;
struct inode vfs_inode;
@@ -207,85 +210,88 @@ int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
EXPORT_SYMBOL(sync_blockdev_range);
/**
- * freeze_bdev - lock a filesystem and force it into a consistent state
+ * bdev_freeze - lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
* The reference counter (bd_fsfreeze_count) guarantees that only the last
* unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
+ * freeze requests arrive simultaneously. It counts up in bdev_freeze() and
+ * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
* actually.
+ *
+ * Return: On success zero is returned, negative error code on failure.
*/
-int freeze_bdev(struct block_device *bdev)
+int bdev_freeze(struct block_device *bdev)
{
- struct super_block *sb;
int error = 0;
mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (++bdev->bd_fsfreeze_count > 1)
- goto done;
-
- sb = get_active_super(bdev);
- if (!sb)
- goto sync;
- if (sb->s_op->freeze_super)
- error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
- else
- error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
- deactivate_super(sb);
- if (error) {
- bdev->bd_fsfreeze_count--;
- goto done;
+ if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return 0;
}
- bdev->bd_fsfreeze_sb = sb;
-sync:
- sync_blockdev(bdev);
-done:
+ mutex_lock(&bdev->bd_holder_lock);
+ if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
+ error = bdev->bd_holder_ops->freeze(bdev);
+ lockdep_assert_not_held(&bdev->bd_holder_lock);
+ } else {
+ mutex_unlock(&bdev->bd_holder_lock);
+ error = sync_blockdev(bdev);
+ }
+
+ if (error)
+ atomic_dec(&bdev->bd_fsfreeze_count);
+
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
-EXPORT_SYMBOL(freeze_bdev);
+EXPORT_SYMBOL(bdev_freeze);
/**
- * thaw_bdev - unlock filesystem
+ * bdev_thaw - unlock filesystem
* @bdev: blockdevice to unlock
*
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
+ * Unlocks the filesystem and marks it writeable again after bdev_freeze().
+ *
+ * Return: On success zero is returned, negative error code on failure.
*/
-int thaw_bdev(struct block_device *bdev)
+int bdev_thaw(struct block_device *bdev)
{
- struct super_block *sb;
- int error = -EINVAL;
+ int error = -EINVAL, nr_freeze;
mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (!bdev->bd_fsfreeze_count)
+
+ /*
+ * If this returns < 0 it means that @bd_fsfreeze_count was
+ * already 0 and no decrement was performed.
+ */
+ nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
+ if (nr_freeze < 0)
goto out;
error = 0;
- if (--bdev->bd_fsfreeze_count > 0)
+ if (nr_freeze > 0)
goto out;
- sb = bdev->bd_fsfreeze_sb;
- if (!sb)
- goto out;
+ mutex_lock(&bdev->bd_holder_lock);
+ if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
+ error = bdev->bd_holder_ops->thaw(bdev);
+ lockdep_assert_not_held(&bdev->bd_holder_lock);
+ } else {
+ mutex_unlock(&bdev->bd_holder_lock);
+ }
- if (sb->s_op->thaw_super)
- error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
- else
- error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
- bdev->bd_fsfreeze_count++;
- else
- bdev->bd_fsfreeze_sb = NULL;
+ atomic_inc(&bdev->bd_fsfreeze_count);
out:
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
-EXPORT_SYMBOL(thaw_bdev);
+EXPORT_SYMBOL(bdev_thaw);
/*
* pseudo-fs
@@ -729,9 +735,60 @@ void blkdev_put_no_open(struct block_device *bdev)
{
put_device(&bdev->bd_device);
}
-
+
+static bool bdev_writes_blocked(struct block_device *bdev)
+{
+ return bdev->bd_writers == -1;
+}
+
+static void bdev_block_writes(struct block_device *bdev)
+{
+ bdev->bd_writers = -1;
+}
+
+static void bdev_unblock_writes(struct block_device *bdev)
+{
+ bdev->bd_writers = 0;
+}
+
+static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
+{
+ if (bdev_allow_write_mounted)
+ return true;
+ /* Writes blocked? */
+ if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
+ return false;
+ if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
+ return false;
+ return true;
+}
+
+static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
+{
+ if (bdev_allow_write_mounted)
+ return;
+
+ /* Claim exclusive or shared write access. */
+ if (mode & BLK_OPEN_RESTRICT_WRITES)
+ bdev_block_writes(bdev);
+ else if (mode & BLK_OPEN_WRITE)
+ bdev->bd_writers++;
+}
+
+static void bdev_yield_write_access(struct block_device *bdev, blk_mode_t mode)
+{
+ if (bdev_allow_write_mounted)
+ return;
+
+ /* Yield exclusive or shared write access. */
+ if (mode & BLK_OPEN_RESTRICT_WRITES)
+ bdev_unblock_writes(bdev);
+ else if (mode & BLK_OPEN_WRITE)
+ bdev->bd_writers--;
+}
+
/**
- * blkdev_get_by_dev - open a block device by device number
+ * bdev_open_by_dev - open a block device by device number
* @dev: device number of block device to open
* @mode: open mode (BLK_OPEN_*)
* @holder: exclusive holder identifier
@@ -743,32 +800,46 @@ void blkdev_put_no_open(struct block_device *bdev)
*
* Use this interface ONLY if you really do not have anything better - i.e. when
* you are behind a truly sucky interface and all you are given is a device
- * number. Everything else should use blkdev_get_by_path().
+ * number. Everything else should use bdev_open_by_path().
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
- * Reference to the block_device on success, ERR_PTR(-errno) on failure.
+ * Handle with a reference to the block_device on success, ERR_PTR(-errno) on
+ * failure.
*/
-struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
- const struct blk_holder_ops *hops)
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops)
{
- bool unblock_events = true;
+ struct bdev_handle *handle = kmalloc(sizeof(struct bdev_handle),
+ GFP_KERNEL);
struct block_device *bdev;
+ bool unblock_events = true;
struct gendisk *disk;
int ret;
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
MAJOR(dev), MINOR(dev),
((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
if (ret)
- return ERR_PTR(ret);
+ goto free_handle;
+
+ /* Blocking writes requires exclusive opener */
+ if (mode & BLK_OPEN_RESTRICT_WRITES && !holder) {
+ ret = -EINVAL;
+ goto free_handle;
+ }
bdev = blkdev_get_no_open(dev);
- if (!bdev)
- return ERR_PTR(-ENXIO);
+ if (!bdev) {
+ ret = -ENXIO;
+ goto free_handle;
+ }
disk = bdev->bd_disk;
if (holder) {
@@ -791,12 +862,16 @@ struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
goto abort_claiming;
if (!try_module_get(disk->fops->owner))
goto abort_claiming;
+ ret = -EBUSY;
+ if (!bdev_may_open(bdev, mode))
+ goto abort_claiming;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
ret = blkdev_get_whole(bdev, mode);
if (ret)
goto put_module;
+ bdev_claim_write_access(bdev, mode);
if (holder) {
bd_finish_claiming(bdev, holder, hops);
@@ -817,7 +892,10 @@ struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
if (unblock_events)
disk_unblock_events(disk);
- return bdev;
+ handle->bdev = bdev;
+ handle->holder = holder;
+ handle->mode = mode;
+ return handle;
put_module:
module_put(disk->fops->owner);
abort_claiming:
@@ -827,34 +905,14 @@ abort_claiming:
disk_unblock_events(disk);
put_blkdev:
blkdev_put_no_open(bdev);
+free_handle:
+ kfree(handle);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(blkdev_get_by_dev);
-
-struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
- const struct blk_holder_ops *hops)
-{
- struct bdev_handle *handle = kmalloc(sizeof(*handle), GFP_KERNEL);
- struct block_device *bdev;
-
- if (!handle)
- return ERR_PTR(-ENOMEM);
- bdev = blkdev_get_by_dev(dev, mode, holder, hops);
- if (IS_ERR(bdev)) {
- kfree(handle);
- return ERR_CAST(bdev);
- }
- handle->bdev = bdev;
- handle->holder = holder;
- if (holder)
- mode |= BLK_OPEN_EXCL;
- handle->mode = mode;
- return handle;
-}
EXPORT_SYMBOL(bdev_open_by_dev);
/**
- * blkdev_get_by_path - open a block device by name
+ * bdev_open_by_path - open a block device by name
* @path: path to the block device to open
* @mode: open mode (BLK_OPEN_*)
* @holder: exclusive holder identifier
@@ -868,29 +926,9 @@ EXPORT_SYMBOL(bdev_open_by_dev);
* Might sleep.
*
* RETURNS:
- * Reference to the block_device on success, ERR_PTR(-errno) on failure.
+ * Handle with a reference to the block_device on success, ERR_PTR(-errno) on
+ * failure.
*/
-struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
- void *holder, const struct blk_holder_ops *hops)
-{
- struct block_device *bdev;
- dev_t dev;
- int error;
-
- error = lookup_bdev(path, &dev);
- if (error)
- return ERR_PTR(error);
-
- bdev = blkdev_get_by_dev(dev, mode, holder, hops);
- if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
- blkdev_put(bdev, holder);
- return ERR_PTR(-EACCES);
- }
-
- return bdev;
-}
-EXPORT_SYMBOL(blkdev_get_by_path);
-
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops)
{
@@ -913,8 +951,9 @@ struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
}
EXPORT_SYMBOL(bdev_open_by_path);
-void blkdev_put(struct block_device *bdev, void *holder)
+void bdev_release(struct bdev_handle *handle)
{
+ struct block_device *bdev = handle->bdev;
struct gendisk *disk = bdev->bd_disk;
/*
@@ -928,8 +967,10 @@ void blkdev_put(struct block_device *bdev, void *holder)
sync_blockdev(bdev);
mutex_lock(&disk->open_mutex);
- if (holder)
- bd_end_claim(bdev, holder);
+ bdev_yield_write_access(bdev, handle->mode);
+
+ if (handle->holder)
+ bd_end_claim(bdev, handle->holder);
/*
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
@@ -946,12 +987,6 @@ void blkdev_put(struct block_device *bdev, void *holder)
module_put(disk->fops->owner);
blkdev_put_no_open(bdev);
-}
-EXPORT_SYMBOL(blkdev_put);
-
-void bdev_release(struct bdev_handle *handle)
-{
- blkdev_put(handle->bdev, handle->holder);
kfree(handle);
}
EXPORT_SYMBOL(bdev_release);
@@ -1102,3 +1137,12 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
blkdev_put_no_open(bdev);
}
+
+static int __init setup_bdev_allow_write_mounted(char *str)
+{
+ if (kstrtobool(str, &bdev_allow_write_mounted))
+ pr_warn("Invalid option string for bdev_allow_write_mounted:"
+ " '%s'\n", str);
+ return 1;
+}
+__setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
diff --git a/block/fops.c b/block/fops.c
index 0abaac705daf..0cf8cf72cdfa 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -410,9 +410,24 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
return 0;
}
-static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
+/*
+ * We cannot call mpage_writepages() as it does not take the buffer lock.
+ * We must use block_write_full_folio() directly which holds the buffer
+ * lock. The buffer lock provides the synchronisation with writeback
+ * that filesystems rely on when they use the blockdev's mapping.
+ */
+static int blkdev_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, blkdev_get_block, wbc);
+ struct blk_plug plug;
+ int err;
+
+ blk_start_plug(&plug);
+ err = write_cache_pages(mapping, wbc, block_write_full_folio,
+ blkdev_get_block);
+ blk_finish_plug(&plug);
+
+ return err;
}
static int blkdev_read_folio(struct file *file, struct folio *folio)
@@ -449,7 +464,7 @@ const struct address_space_operations def_blk_aops = {
.invalidate_folio = block_invalidate_folio,
.read_folio = blkdev_read_folio,
.readahead = blkdev_readahead,
- .writepage = blkdev_writepage,
+ .writepages = blkdev_writepages,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
.migrate_folio = buffer_migrate_folio_norefs,
@@ -500,7 +515,7 @@ const struct address_space_operations def_blk_aops = {
.readahead = blkdev_readahead,
.writepages = blkdev_writepages,
.is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.migrate_folio = filemap_migrate_folio,
};
#endif /* CONFIG_BUFFER_HEAD */
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 9711e8fc979d..3a89644f087c 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -2044,7 +2044,7 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64
notifier_event->events_mask |= event_mask;
if (notifier_event->eventfd)
- eventfd_signal(notifier_event->eventfd, 1);
+ eventfd_signal(notifier_event->eventfd);
mutex_unlock(&notifier_event->lock);
}
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 5036e58e7235..1405623b03e4 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -404,8 +404,21 @@ static struct mhi_controller_config aic100_config = {
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
{
- u32 tmp = readl_relaxed(addr);
+ u32 tmp;
+ /*
+ * SOC_HW_VERSION quirk
+ * The SOC_HW_VERSION register (offset 0x224) is not reliable and
+ * may contain uninitialized values, including 0xFFFFFFFF. This could
+ * cause a false positive link down error. Instead, intercept any
+ * reads and provide the correct value of the register.
+ */
+ if (addr - mhi_cntrl->regs == 0x224) {
+ *out = 0x60110200;
+ return 0;
+ }
+
+ tmp = readl_relaxed(addr);
if (tmp == U32_MAX)
return -EIO;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 4a8e43a7a6a4..24e886f857d5 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -451,7 +451,7 @@ static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 s
* later
*/
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
- max_order = min(MAX_ORDER - 1, get_order(size));
+ max_order = min(MAX_PAGE_ORDER - 1, get_order(size));
} else {
/* allocate a single page for book keeping */
nr_pages = 1;
@@ -777,7 +777,6 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_
struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
struct qaic_bo *bo;
- size_t size;
int ret;
bo = qaic_alloc_init_bo();
@@ -795,13 +794,12 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_
goto attach_fail;
}
- size = PAGE_ALIGN(attach->dmabuf->size);
- if (size == 0) {
+ if (!attach->dmabuf->size) {
ret = -EINVAL;
goto size_align_fail;
}
- drm_gem_private_object_init(dev, obj, size);
+ drm_gem_private_object_init(dev, obj, attach->dmabuf->size);
/*
* skipping dma_buf_map_attachment() as we do not know the direction
* just yet. Once the direction is known in the subsequent IOCTL to
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f819e760ff19..6f2bfcf7645c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -61,6 +61,10 @@ config ACPI_CCA_REQUIRED
config ACPI_TABLE_LIB
bool
+config ACPI_THERMAL_LIB
+ depends on THERMAL
+ bool
+
config ACPI_DEBUGGER
bool "AML debugger interface"
select ACPI_DEBUG
@@ -327,6 +331,7 @@ config ACPI_THERMAL
tristate "Thermal Zone"
depends on ACPI_PROCESSOR
select THERMAL
+ select ACPI_THERMAL_LIB
default y
help
This driver supports ACPI thermal zones. Most mobile and
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index eaa09bf52f17..12ef8180d272 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o
# ACPI Bus and Device Drivers
#
acpi-y += bus.o glue.o
-acpi-y += scan.o
+acpi-y += scan.o mipi-disco-img.o
acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
@@ -89,6 +89,7 @@ obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
+obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index e120a96e1eae..ca87a0939135 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
static u32 err_seq;
estatus = extlog_elog_entry_check(cpu, bank);
- if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
+ if (!estatus)
return NOTIFY_DONE;
+ if (mce->kflags & MCE_HANDLED_CEC) {
+ estatus->block_status = 0;
+ return NOTIFY_DONE;
+ }
+
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
/* clear record status to enable BIOS to update it again */
estatus->block_status = 0;
@@ -303,9 +308,10 @@ err:
static void __exit extlog_exit(void)
{
mce_unregister_decode_chain(&extlog_mce_dec);
- ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
- if (extlog_l1_addr)
+ if (extlog_l1_addr) {
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
+ }
if (elog_addr)
acpi_os_unmap_iomem(elog_addr, elog_size);
release_mem_region(elog_base, elog_size);
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index c5598b6d5db8..794962c5c88e 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -105,7 +105,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
return;
info->frequency = lpit_native->counter_frequency ?
- lpit_native->counter_frequency : tsc_khz * 1000;
+ lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
if (!info->frequency)
info->frequency = 1;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 875de44961bf..04e273167e92 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -167,13 +167,9 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
- u64 uid;
-
/* Only call pwm_add_table for the first PWM controller */
- if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
- return;
-
- pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ if (acpi_dev_uid_match(pdata->adev, 1))
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
#define LPSS_I2C_ENABLE 0x6c
@@ -218,13 +214,9 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
- u64 uid;
-
/* Only call pwm_add_table for the first PWM controller */
- if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
- return;
-
- pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
+ if (acpi_dev_uid_match(pdata->adev, 1))
+ pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
static const struct property_entry lpt_spi_properties[] = {
@@ -461,8 +453,9 @@ static int register_device_clock(struct acpi_device *adev,
if (!clk_name)
return -ENOMEM;
clk = clk_register_fractional_divider(NULL, clk_name, parent,
+ 0, prv_base, 1, 15, 16, 15,
CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
- prv_base, 1, 15, 16, 15, 0, NULL);
+ NULL);
parent = clk_name;
clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
@@ -570,34 +563,6 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
}
-static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
-{
- struct acpi_handle_list dep_devices;
- acpi_status status;
- bool ret = false;
- int i;
-
- if (!acpi_has_method(adev->handle, "_DEP"))
- return false;
-
- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
- &dep_devices);
- if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
- return false;
- }
-
- for (i = 0; i < dep_devices.count; i++) {
- if (dep_devices.handles[i] == handle) {
- ret = true;
- break;
- }
- }
-
- acpi_handle_list_free(&dep_devices);
- return ret;
-}
-
static void acpi_lpss_link_consumer(struct device *dev1,
const struct lpss_device_links *link)
{
@@ -608,7 +573,7 @@ static void acpi_lpss_link_consumer(struct device *dev1,
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
- || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
@@ -624,7 +589,7 @@ static void acpi_lpss_link_supplier(struct device *dev1,
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
- || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 6cee536c229a..4afdda9db019 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(hw_changes_brightness,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static int only_lcd = -1;
+static int only_lcd;
module_param(only_lcd, int, 0444);
static bool may_report_brightness_keys;
@@ -500,6 +500,15 @@ static const struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
},
+ {
+ .callback = video_set_report_key_events,
+ .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
+ .ident = "COLORFUL X15 AT 23",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"),
+ },
+ },
/*
* Some machines change the brightness themselves when a brightness
* hotkey gets pressed, despite us telling them not to. In this case
@@ -1713,12 +1722,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
return;
count++;
- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
+ if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
}
memset(&props, 0, sizeof(struct backlight_properties));
@@ -2137,57 +2146,6 @@ static int __init intel_opregion_present(void)
return opregion;
}
-/* Check if the chassis-type indicates there is no builtin LCD panel */
-static bool dmi_is_desktop(void)
-{
- const char *chassis_type;
- unsigned long type;
-
- chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
- if (!chassis_type)
- return false;
-
- if (kstrtoul(chassis_type, 10, &type) != 0)
- return false;
-
- switch (type) {
- case 0x03: /* Desktop */
- case 0x04: /* Low Profile Desktop */
- case 0x05: /* Pizza Box */
- case 0x06: /* Mini Tower */
- case 0x07: /* Tower */
- case 0x10: /* Lunch Box */
- case 0x11: /* Main Server Chassis */
- return true;
- }
-
- return false;
-}
-
-/*
- * We're seeing a lot of bogus backlight interfaces on newer machines
- * without a LCD such as desktops, servers and HDMI sticks. Checking the
- * lcd flag fixes this, enable this by default on any machines which are:
- * 1. Win8 ready (where we also prefer the native backlight driver, so
- * normally the acpi_video code should not register there anyways); *and*
- * 2.1 Report a desktop/server DMI chassis-type, or
- * 2.2 Are an ACPI-reduced-hardware platform (and thus won't use the EC for
- backlight control)
- */
-static bool should_check_lcd_flag(void)
-{
- if (!acpi_osi_is_win8())
- return false;
-
- if (dmi_is_desktop())
- return true;
-
- if (acpi_reduced_hardware())
- return true;
-
- return false;
-}
-
int acpi_video_register(void)
{
int ret = 0;
@@ -2201,9 +2159,6 @@ int acpi_video_register(void)
goto leave;
}
- if (only_lcd == -1)
- only_lcd = should_check_lcd_flag();
-
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ca28183f4d13..8e9e001da38f 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -81,7 +81,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
return wdat;
}
-/**
+/*
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 013eb621dc92..89fb9331c611 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -73,6 +73,7 @@ static u32 notrigger;
static u32 vendor_flags;
static struct debugfs_blob_wrapper vendor_blob;
+static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
/*
@@ -182,6 +183,21 @@ static int einj_timedout(u64 *t)
return 0;
}
+static void get_oem_vendor_struct(u64 paddr, int offset,
+ struct vendor_error_type_extension *v)
+{
+ unsigned long vendor_size;
+ u64 target_pa = paddr + offset + sizeof(struct vendor_error_type_extension);
+
+ vendor_size = v->length - sizeof(struct vendor_error_type_extension);
+
+ if (vendor_size)
+ vendor_errors.data = acpi_os_map_memory(target_pa, vendor_size);
+
+ if (vendor_errors.data)
+ vendor_errors.size = vendor_size;
+}
+
static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
@@ -194,6 +210,7 @@ static void check_vendor_extension(u64 paddr,
v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
if (!v)
return;
+ get_oem_vendor_struct(paddr, offset, v);
sbdf = v->pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
@@ -577,38 +594,40 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
-static const char * const einj_error_type_string[] = {
- "0x00000001\tProcessor Correctable\n",
- "0x00000002\tProcessor Uncorrectable non-fatal\n",
- "0x00000004\tProcessor Uncorrectable fatal\n",
- "0x00000008\tMemory Correctable\n",
- "0x00000010\tMemory Uncorrectable non-fatal\n",
- "0x00000020\tMemory Uncorrectable fatal\n",
- "0x00000040\tPCI Express Correctable\n",
- "0x00000080\tPCI Express Uncorrectable non-fatal\n",
- "0x00000100\tPCI Express Uncorrectable fatal\n",
- "0x00000200\tPlatform Correctable\n",
- "0x00000400\tPlatform Uncorrectable non-fatal\n",
- "0x00000800\tPlatform Uncorrectable fatal\n",
- "0x00001000\tCXL.cache Protocol Correctable\n",
- "0x00002000\tCXL.cache Protocol Uncorrectable non-fatal\n",
- "0x00004000\tCXL.cache Protocol Uncorrectable fatal\n",
- "0x00008000\tCXL.mem Protocol Correctable\n",
- "0x00010000\tCXL.mem Protocol Uncorrectable non-fatal\n",
- "0x00020000\tCXL.mem Protocol Uncorrectable fatal\n",
+static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
+ { BIT(0), "Processor Correctable" },
+ { BIT(1), "Processor Uncorrectable non-fatal" },
+ { BIT(2), "Processor Uncorrectable fatal" },
+ { BIT(3), "Memory Correctable" },
+ { BIT(4), "Memory Uncorrectable non-fatal" },
+ { BIT(5), "Memory Uncorrectable fatal" },
+ { BIT(6), "PCI Express Correctable" },
+ { BIT(7), "PCI Express Uncorrectable non-fatal" },
+ { BIT(8), "PCI Express Uncorrectable fatal" },
+ { BIT(9), "Platform Correctable" },
+ { BIT(10), "Platform Uncorrectable non-fatal" },
+ { BIT(11), "Platform Uncorrectable fatal"},
+ { BIT(12), "CXL.cache Protocol Correctable" },
+ { BIT(13), "CXL.cache Protocol Uncorrectable non-fatal" },
+ { BIT(14), "CXL.cache Protocol Uncorrectable fatal" },
+ { BIT(15), "CXL.mem Protocol Correctable" },
+ { BIT(16), "CXL.mem Protocol Uncorrectable non-fatal" },
+ { BIT(17), "CXL.mem Protocol Uncorrectable fatal" },
+ { BIT(31), "Vendor Defined Error Types" },
};
static int available_error_type_show(struct seq_file *m, void *v)
{
int rc;
- u32 available_error_type = 0;
+ u32 error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&error_type);
if (rc)
return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (available_error_type & BIT(pos))
- seq_puts(m, einj_error_type_string[pos]);
+ if (error_type & einj_error_type_string[pos].mask)
+ seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
+ einj_error_type_string[pos].str);
return 0;
}
@@ -767,6 +786,10 @@ static int __init einj_init(void)
einj_debug_dir, &vendor_flags);
}
+ if (vendor_errors.size)
+ debugfs_create_blob("oem_error", 0600, einj_debug_dir,
+ &vendor_errors);
+
pr_info("Error INJection is initialized.\n");
return 0;
@@ -792,6 +815,8 @@ static void __exit einj_exit(void)
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
+ if (vendor_errors.size)
+ acpi_os_unmap_memory(vendor_errors.data, vendor_errors.size);
}
einj_exec_ctx_init(&ctx);
apei_exec_post_unmap_gars(&ctx);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 63ad0541db38..ab2a82cb1b0b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -102,6 +102,20 @@ static inline bool is_hest_type_generic_v2(struct ghes *ghes)
}
/*
+ * A platform may describe one error source for the handling of synchronous
+ * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
+ * or External Interrupt). On x86, the HEST notifications are always
+ * asynchronous, so only SEA on ARM is delivered as a synchronous
+ * notification.
+ */
+static inline bool is_hest_sync_notify(struct ghes *ghes)
+{
+ u8 notify_type = ghes->generic->notify.type;
+
+ return notify_type == ACPI_HEST_NOTIFY_SEA;
+}
+
+/*
* This driver isn't really modular, however for the time being,
* continuing to use module_param is the easiest way to remain
* compatible with existing boot arg use cases.
@@ -489,7 +503,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
}
static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
- int sev)
+ int sev, bool sync)
{
int flags = -1;
int sec_sev = ghes_severity(gdata->error_severity);
@@ -503,7 +517,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
flags = MF_SOFT_OFFLINE;
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
- flags = 0;
+ flags = sync ? MF_ACTION_REQUIRED : 0;
if (flags != -1)
return ghes_do_memory_failure(mem_err->physical_addr, flags);
@@ -511,9 +525,11 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
return false;
}
-static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
+static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
+ int sev, bool sync)
{
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
+ int flags = sync ? MF_ACTION_REQUIRED : 0;
bool queued = false;
int sec_sev, i;
char *p;
@@ -538,7 +554,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int s
* and don't filter out 'corrected' error here.
*/
if (is_cache && has_pa) {
- queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
+ queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
p += err_info->length;
continue;
}
@@ -666,6 +682,7 @@ static bool ghes_do_proc(struct ghes *ghes,
const guid_t *fru_id = &guid_null;
char *fru_text = "";
bool queued = false;
+ bool sync = is_hest_sync_notify(ghes);
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -683,13 +700,13 @@ static bool ghes_do_proc(struct ghes *ghes,
atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
arch_apei_report_mem_error(sev, mem_err);
- queued = ghes_handle_memory_failure(gdata, sev);
+ queued = ghes_handle_memory_failure(gdata, sev, sync);
}
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
- queued = ghes_handle_arm_hw_error(gdata, sev);
+ queued = ghes_handle_arm_hw_error(gdata, sev, sync);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 143debc1ba4a..726944648c9b 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
+obj-y += thermal_cpufreq.o
diff --git a/drivers/acpi/arm64/thermal_cpufreq.c b/drivers/acpi/arm64/thermal_cpufreq.c
new file mode 100644
index 000000000000..582854914c5c
--- /dev/null
+++ b/drivers/acpi/arm64/thermal_cpufreq.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include <linux/export.h>
+
+#include "../internal.h"
+
+#define SMCCC_SOC_ID_T241 0x036b0241
+
+int acpi_arch_thermal_cpufreq_pctg(void)
+{
+ s32 soc_id = arm_smccc_get_soc_id_version();
+
+ /*
+ * Check JEP106 code for NVIDIA Tegra241 chip (036b:0241) and
+ * reduce the CPUFREQ Thermal reduction percentage to 5%.
+ */
+ if (soc_id == SMCCC_SOC_ID_T241)
+ return 5;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_arch_thermal_cpufreq_pctg);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 72e64c0718c9..569bd15f211b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -408,7 +408,7 @@ static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
static void acpi_bus_osc_negotiate_usb_control(void)
{
- u32 capbuf[3];
+ u32 capbuf[3], *capbuf_ret;
struct acpi_osc_context context = {
.uuid_str = sb_usb_uuid_str,
.rev = 1,
@@ -428,7 +428,12 @@ static void acpi_bus_osc_negotiate_usb_control(void)
control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
- capbuf[OSC_QUERY_DWORD] = 0;
+ /*
+ * Run _OSC first with query bit set, trying to get control over
+ * all tunneling. The platform can then clear out bits in the
+ * control dword that it does not want to grant to the OS.
+ */
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_DWORD] = 0;
capbuf[OSC_CONTROL_DWORD] = control;
@@ -441,8 +446,29 @@ static void acpi_bus_osc_negotiate_usb_control(void)
goto out_free;
}
+ /*
+ * Run _OSC again now with query bit clear and the control dword
+ * matching what the platform granted (which may not have all
+ * the control bits set).
+ */
+ capbuf_ret = context.ret.pointer;
+
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD];
+
+ kfree(context.ret.pointer);
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (context.ret.length != sizeof(capbuf)) {
+ pr_info("USB4 _OSC: returned invalid length buffer\n");
+ goto out_free;
+ }
+
osc_sb_native_usb4_control =
- control & acpi_osc_ctx_get_pci_control(&context);
+ control & acpi_osc_ctx_get_pci_control(&context);
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1e76a64cce0a..cc61020756be 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -480,6 +480,7 @@ static int acpi_button_suspend(struct device *dev)
static int acpi_button_resume(struct device *dev)
{
+ struct input_dev *input;
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
@@ -489,6 +490,14 @@ static int acpi_button_resume(struct device *dev)
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
}
+
+ if (button->type == ACPI_BUTTON_TYPE_POWER) {
+ input = button->input;
+ input_report_key(input, KEY_WAKEUP, 1);
+ input_sync(input);
+ input_report_key(input, KEY_WAKEUP, 0);
+ input_sync(input);
+ }
return 0;
}
#endif
@@ -579,6 +588,7 @@ static int acpi_button_add(struct acpi_device *device)
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_KEY, KEY_WAKEUP);
break;
case ACPI_BUTTON_TYPE_SLEEP:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 7ff269a78c20..d155a86a8614 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,9 @@
#include <linux/rwsem.h>
#include <linux/wait.h>
#include <linux/topology.h>
+#include <linux/dmi.h>
+#include <linux/units.h>
+#include <asm/unaligned.h>
#include <acpi/cppc_acpi.h>
@@ -1760,3 +1763,104 @@ unsigned int cppc_get_transition_latency(int cpu_num)
return latency_ns;
}
EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
+
+/* Minimum struct length needed for the DMI processor entry we want */
+#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
+
+/* Offset in the DMI processor structure for the max frequency */
+#define DMI_PROCESSOR_MAX_SPEED 0x14
+
+/* Callback function used to retrieve the max frequency from DMI */
+static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
+{
+ const u8 *dmi_data = (const u8 *)dm;
+ u16 *mhz = (u16 *)private;
+
+ if (dm->type == DMI_ENTRY_PROCESSOR &&
+ dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
+ u16 val = (u16)get_unaligned((const u16 *)
+ (dmi_data + DMI_PROCESSOR_MAX_SPEED));
+ *mhz = val > *mhz ? val : *mhz;
+ }
+}
+
+/* Look up the max frequency in DMI */
+static u64 cppc_get_dmi_max_khz(void)
+{
+ u16 mhz = 0;
+
+ dmi_walk(cppc_find_dmi_mhz, &mhz);
+
+ /*
+ * Real stupid fallback value, just in case there is no
+ * actual value set.
+ */
+ mhz = mhz ? mhz : 1;
+
+ return KHZ_PER_MHZ * mhz;
+}
+
+/*
+ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+ * use them to convert perf to freq and vice versa. The conversion is
+ * extrapolated as an affine function passing by the 2 points:
+ * - (Low perf, Low freq)
+ * - (Nominal perf, Nominal freq)
+ */
+unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
+{
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ mul *= KHZ_PER_MHZ;
+ div = caps->nominal_perf - caps->lowest_perf;
+ offset = caps->nominal_freq * KHZ_PER_MHZ -
+ div64_u64(caps->nominal_perf * mul, div);
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = max_khz;
+ div = caps->highest_perf;
+ }
+
+ retval = offset + div64_u64(perf * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
+
+unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
+{
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ /*
+ * We don't need to convert to kHz for computing offset and can
+ * directly use nominal_freq and lowest_freq as the div64_u64
+ * will remove the frequency unit.
+ */
+ offset = caps->nominal_perf -
+ div64_u64(caps->nominal_freq * mul, div);
+ /* But we need it for computing the perf level. */
+ div *= KHZ_PER_MHZ;
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = caps->highest_perf;
+ div = max_khz;
+ }
+
+ retval = offset + div64_u64(freq * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a59c11df7375..dbdee2924594 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,12 +525,10 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -546,11 +544,9 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
__acpi_ec_disable_event(ec);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -571,10 +567,9 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
- unsigned long flags;
bool guarded;
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -590,31 +585,29 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
return ret;
}
@@ -756,7 +749,6 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
- unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -765,14 +757,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
advance_transaction(ec, false);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
start_transaction(ec);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
return -ETIME;
}
@@ -780,11 +772,10 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
- unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock_irqsave(&ec->lock, tmp);
+ spin_lock(&ec->lock);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -795,11 +786,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock_irqrestore(&ec->lock, tmp);
+ spin_unlock(&ec->lock);
ret = ec_poll(ec);
- spin_lock_irqsave(&ec->lock, tmp);
+ spin_lock(&ec->lock);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -808,7 +799,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock_irqrestore(&ec->lock, tmp);
+ spin_unlock(&ec->lock);
return ret;
}
@@ -936,9 +927,7 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -948,31 +937,28 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
- unsigned long flags;
bool flushed;
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
flushed = acpi_ec_flushed(ec);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -983,29 +969,25 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
void acpi_ec_block_transactions(void)
@@ -1137,9 +1119,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock_irq(&ec->lock);
+ spin_lock(&ec->lock);
ec->queries_in_progress--;
- spin_unlock_irq(&ec->lock);
+ spin_unlock(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1202,12 +1184,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock_irq(&ec->lock);
+ spin_lock(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock_irq(&ec->lock);
+ spin_unlock(&ec->lock);
return 0;
@@ -1223,14 +1205,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock_irq(&ec->lock);
+ spin_lock(&ec->lock);
while (ec->events_to_process) {
- spin_unlock_irq(&ec->lock);
+ spin_unlock(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock_irq(&ec->lock);
+ spin_lock(&ec->lock);
ec->events_to_process--;
}
@@ -1247,11 +1229,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock_irq(&ec->lock);
+ spin_unlock(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock_irq(&ec->lock);
+ spin_lock(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1264,7 +1246,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock_irq(&ec->lock);
+ spin_unlock(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1289,13 +1271,11 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
+ spin_lock(&ec->lock);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock_irqrestore(&ec->lock, flags);
+ spin_unlock(&ec->lock);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -1458,8 +1438,8 @@ static bool install_gpe_event_handler(struct acpi_ec *ec)
static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
{
- return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
- "ACPI EC", ec) >= 0;
+ return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0;
}
/**
@@ -2105,7 +2085,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock_irq(&first_ec->lock);
+ spin_lock(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2114,7 +2094,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock_irq(&first_ec->lock);
+ spin_unlock(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2127,11 +2107,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock_irq(&first_ec->lock);
+ spin_lock(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock_irq(&first_ec->lock);
+ spin_unlock(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 866c7c4ed233..6588525c45ef 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -85,6 +85,20 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
void acpi_scan_table_notify(void);
+int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+
+#ifdef CONFIG_ARM64
+int acpi_arch_thermal_cpufreq_pctg(void);
+#else
+static inline int acpi_arch_thermal_cpufreq_pctg(void)
+{
+ return 0;
+}
+#endif
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
@@ -148,8 +162,11 @@ int acpi_wakeup_device_init(void);
#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
void acpi_early_processor_control_setup(void);
void acpi_early_processor_set_pdc(void);
-
+#ifdef CONFIG_X86
void acpi_proc_quirk_mwait_check(void);
+#else
+static inline void acpi_proc_quirk_mwait_check(void) {}
+#endif
bool processor_physically_present(acpi_handle handle);
#else
static inline void acpi_early_processor_control_setup(void) {}
@@ -276,4 +293,13 @@ void acpi_init_lpit(void);
static inline void acpi_init_lpit(void) { }
#endif
+/*--------------------------------------------------------------------------
+ ACPI _CRS CSI-2 and MIPI DisCo for Imaging
+ -------------------------------------------------------------------------- */
+
+void acpi_mipi_check_crs_csi2(acpi_handle handle);
+void acpi_mipi_scan_crs_csi2(void);
+void acpi_mipi_init_crs_csi2_swnodes(void);
+void acpi_mipi_crs_csi2_cleanup(void);
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
new file mode 100644
index 000000000000..7286cf4579bc
--- /dev/null
+++ b/drivers/acpi/mipi-disco-img.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPI DisCo for Imaging support.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ *
+ * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in
+ * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource
+ * Descriptor" of ACPI 6.5 and using device properties defined by the MIPI DisCo
+ * for Imaging specification.
+ *
+ * The implementation looks for the information in the ACPI namespace (CSI-2
+ * resource descriptors in _CRS) and constructs software nodes compatible with
+ * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2
+ * connection graph. The software nodes are then populated with the data
+ * extracted from the _CRS CSI-2 resource descriptors and the MIPI DisCo
+ * for Imaging device properties present in _DSD for the ACPI device objects
+ * with CSI-2 connections.
+ */
+
+#include <linux/acpi.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <media/v4l2-fwnode.h>
+
+#include "internal.h"
+
+static LIST_HEAD(acpi_mipi_crs_csi2_list);
+
+static void acpi_mipi_data_tag(acpi_handle handle, void *context)
+{
+}
+
+/* Connection data extracted from one _CRS CSI-2 resource descriptor. */
+struct crs_csi2_connection {
+ struct list_head entry;
+ struct acpi_resource_csi2_serialbus csi2_data;
+ acpi_handle remote_handle;
+ char remote_name[];
+};
+
+/* Data extracted from _CRS CSI-2 resource descriptors for one device. */
+struct crs_csi2 {
+ struct list_head entry;
+ acpi_handle handle;
+ struct acpi_device_software_nodes *swnodes;
+ struct list_head connections;
+ u32 port_count;
+};
+
+struct csi2_resources_walk_data {
+ acpi_handle handle;
+ struct list_head connections;
+};
+
+static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context)
+{
+ struct csi2_resources_walk_data *crwd = context;
+ struct acpi_resource_csi2_serialbus *csi2_res;
+ struct acpi_resource_source *csi2_res_src;
+ u16 csi2_res_src_length;
+ struct crs_csi2_connection *conn;
+ acpi_handle remote_handle;
+
+ if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return AE_OK;
+
+ csi2_res = &res->data.csi2_serial_bus;
+
+ if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2)
+ return AE_OK;
+
+ csi2_res_src = &csi2_res->resource_source;
+ if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr,
+ &remote_handle))) {
+ acpi_handle_debug(crwd->handle,
+ "unable to find resource source\n");
+ return AE_OK;
+ }
+ csi2_res_src_length = csi2_res_src->string_length;
+ if (!csi2_res_src_length) {
+ acpi_handle_debug(crwd->handle,
+ "invalid resource source string length\n");
+ return AE_OK;
+ }
+
+ conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1),
+ GFP_KERNEL);
+ if (!conn)
+ return AE_OK;
+
+ conn->csi2_data = *csi2_res;
+ strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length);
+ conn->csi2_data.resource_source.string_ptr = conn->remote_name;
+ conn->remote_handle = remote_handle;
+
+ list_add(&conn->entry, &crwd->connections);
+
+ return AE_OK;
+}
+
+static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle,
+ struct list_head *list)
+{
+ struct crs_csi2 *csi2;
+
+ csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return NULL;
+
+ csi2->handle = handle;
+ INIT_LIST_HEAD(&csi2->connections);
+ csi2->port_count = 1;
+
+ if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) {
+ kfree(csi2);
+ return NULL;
+ }
+
+ list_add(&csi2->entry, list);
+
+ return csi2;
+}
+
+static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle)
+{
+ struct crs_csi2 *csi2;
+
+ if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag,
+ (void **)&csi2, NULL)))
+ return NULL;
+
+ return csi2;
+}
+
+static void csi_csr2_release_connections(struct list_head *list)
+{
+ struct crs_csi2_connection *conn, *conn_tmp;
+
+ list_for_each_entry_safe(conn, conn_tmp, list, entry) {
+ list_del(&conn->entry);
+ kfree(conn);
+ }
+}
+
+static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2)
+{
+ list_del(&csi2->entry);
+ acpi_detach_data(csi2->handle, acpi_mipi_data_tag);
+ kfree(csi2->swnodes);
+ csi_csr2_release_connections(&csi2->connections);
+ kfree(csi2);
+}
+
+/**
+ * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS
+ * @handle: Device object handle to evaluate _CRS for.
+ *
+ * Find all CSI-2 resource descriptors in the given device's _CRS
+ * and collect them into a list.
+ */
+void acpi_mipi_check_crs_csi2(acpi_handle handle)
+{
+ struct csi2_resources_walk_data crwd = {
+ .handle = handle,
+ .connections = LIST_HEAD_INIT(crwd.connections),
+ };
+ struct crs_csi2 *csi2;
+
+ /*
+ * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2
+ * resource descriptions in _CRS to reduce overhead.
+ */
+ acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd);
+ if (list_empty(&crwd.connections))
+ return;
+
+ /*
+ * Create a _CRS CSI-2 entry to store the extracted connection
+ * information and add it to the global list.
+ */
+ csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list);
+ if (!csi2) {
+ csi_csr2_release_connections(&crwd.connections);
+ return; /* Nothing really can be done about this. */
+ }
+
+ list_replace(&crwd.connections, &csi2->connections);
+}
+
+#define NO_CSI2_PORT (UINT_MAX - 1)
+
+static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ size_t port_count = csi2->port_count;
+ struct acpi_device_software_nodes *swnodes;
+ size_t alloc_size;
+ unsigned int i;
+
+ /*
+ * Allocate memory for ports, node pointers (number of nodes +
+ * 1 (guardian), nodes (root + number of ports * 2 (because for
+ * every port there is an endpoint)).
+ */
+ if (check_mul_overflow(sizeof(*swnodes->ports) +
+ sizeof(*swnodes->nodes) * 2 +
+ sizeof(*swnodes->nodeptrs) * 2,
+ port_count, &alloc_size) ||
+ check_add_overflow(sizeof(*swnodes) +
+ sizeof(*swnodes->nodes) +
+ sizeof(*swnodes->nodeptrs) * 2,
+ alloc_size, &alloc_size)) {
+ acpi_handle_info(csi2->handle,
+ "too many _CRS CSI-2 resource handles (%zu)",
+ port_count);
+ return;
+ }
+
+ swnodes = kmalloc(alloc_size, GFP_KERNEL);
+ if (!swnodes)
+ return;
+
+ swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1);
+ swnodes->nodes = (struct software_node *)(swnodes->ports + port_count);
+ swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 +
+ 2 * port_count);
+ swnodes->num_ports = port_count;
+
+ for (i = 0; i < 2 * port_count + 1; i++)
+ swnodes->nodeptrs[i] = &swnodes->nodes[i];
+
+ swnodes->nodeptrs[i] = NULL;
+
+ for (i = 0; i < port_count; i++)
+ swnodes->ports[i].port_nr = NO_CSI2_PORT;
+
+ csi2->swnodes = swnodes;
+}
+
+#define ACPI_CRS_CSI2_PHY_TYPE_C 0
+#define ACPI_CRS_CSI2_PHY_TYPE_D 1
+
+static unsigned int next_csi2_port_index(struct acpi_device_software_nodes *swnodes,
+ unsigned int port_nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < swnodes->num_ports; i++) {
+ struct acpi_device_software_node_port *port = &swnodes->ports[i];
+
+ if (port->port_nr == port_nr)
+ return i;
+
+ if (port->port_nr == NO_CSI2_PORT) {
+ port->port_nr = port_nr;
+ return i;
+ }
+ }
+
+ return NO_CSI2_PORT;
+}
+
+/* Print graph port name into a buffer, return non-zero on failure. */
+#define GRAPH_PORT_NAME(var, num) \
+ (snprintf((var), sizeof(var), SWNODE_GRAPH_PORT_NAME_FMT, (num)) >= \
+ sizeof(var))
+
+static void extract_crs_csi2_conn_info(acpi_handle local_handle,
+ struct acpi_device_software_nodes *local_swnodes,
+ struct crs_csi2_connection *conn)
+{
+ struct crs_csi2 *remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle);
+ struct acpi_device_software_nodes *remote_swnodes;
+ struct acpi_device_software_node_port *local_port, *remote_port;
+ struct software_node *local_node, *remote_node;
+ unsigned int local_index, remote_index;
+ unsigned int bus_type;
+
+ /*
+ * If the previous steps have failed to make room for a _CRS CSI-2
+ * representation for the remote end of the given connection, skip it.
+ */
+ if (!remote_csi2)
+ return;
+
+ remote_swnodes = remote_csi2->swnodes;
+ if (!remote_swnodes)
+ return;
+
+ switch (conn->csi2_data.phy_type) {
+ case ACPI_CRS_CSI2_PHY_TYPE_C:
+ bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_CPHY;
+ break;
+
+ case ACPI_CRS_CSI2_PHY_TYPE_D:
+ bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_DPHY;
+ break;
+
+ default:
+ acpi_handle_info(local_handle, "unknown CSI-2 PHY type %u\n",
+ conn->csi2_data.phy_type);
+ return;
+ }
+
+ local_index = next_csi2_port_index(local_swnodes,
+ conn->csi2_data.local_port_instance);
+ if (WARN_ON_ONCE(local_index >= local_swnodes->num_ports))
+ return;
+
+ remote_index = next_csi2_port_index(remote_swnodes,
+ conn->csi2_data.resource_source.index);
+ if (WARN_ON_ONCE(remote_index >= remote_swnodes->num_ports))
+ return;
+
+ local_port = &local_swnodes->ports[local_index];
+ local_node = &local_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(local_index)];
+ local_port->crs_csi2_local = true;
+
+ remote_port = &remote_swnodes->ports[remote_index];
+ remote_node = &remote_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(remote_index)];
+
+ local_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(remote_node);
+ remote_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(local_node);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] =
+ PROPERTY_ENTRY_REF_ARRAY("remote-endpoint",
+ local_port->remote_ep);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] =
+ PROPERTY_ENTRY_U32("bus-type", bus_type);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] =
+ PROPERTY_ENTRY_U32("reg", 0);
+
+ local_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] =
+ PROPERTY_ENTRY_U32("reg", conn->csi2_data.local_port_instance);
+
+ if (GRAPH_PORT_NAME(local_port->port_name,
+ conn->csi2_data.local_port_instance))
+ acpi_handle_info(local_handle, "local port %u name too long",
+ conn->csi2_data.local_port_instance);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] =
+ PROPERTY_ENTRY_REF_ARRAY("remote-endpoint",
+ remote_port->remote_ep);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] =
+ PROPERTY_ENTRY_U32("bus-type", bus_type);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] =
+ PROPERTY_ENTRY_U32("reg", 0);
+
+ remote_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] =
+ PROPERTY_ENTRY_U32("reg", conn->csi2_data.resource_source.index);
+
+ if (GRAPH_PORT_NAME(remote_port->port_name,
+ conn->csi2_data.resource_source.index))
+ acpi_handle_info(local_handle, "remote port %u name too long",
+ conn->csi2_data.resource_source.index);
+}
+
+static void prepare_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ struct acpi_device_software_nodes *local_swnodes = csi2->swnodes;
+ acpi_handle local_handle = csi2->handle;
+ struct crs_csi2_connection *conn;
+
+ /* Bail out if the allocation of swnodes has failed. */
+ if (!local_swnodes)
+ return;
+
+ list_for_each_entry(conn, &csi2->connections, entry)
+ extract_crs_csi2_conn_info(local_handle, local_swnodes, conn);
+}
+
+/**
+ * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes
+ *
+ * Note that this function must be called before any struct acpi_device objects
+ * are bound to any ACPI drivers or scan handlers, so it cannot assume the
+ * existence of struct acpi_device objects for every device present in the ACPI
+ * namespace.
+ *
+ * acpi_scan_lock in scan.c must be held when calling this function.
+ */
+void acpi_mipi_scan_crs_csi2(void)
+{
+ struct crs_csi2 *csi2;
+ LIST_HEAD(aux_list);
+
+ /* Count references to each ACPI handle in the CSI-2 connection graph. */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) {
+ struct crs_csi2_connection *conn;
+
+ list_for_each_entry(conn, &csi2->connections, entry) {
+ struct crs_csi2 *remote_csi2;
+
+ csi2->port_count++;
+
+ remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle);
+ if (remote_csi2) {
+ remote_csi2->port_count++;
+ continue;
+ }
+ /*
+ * The remote endpoint has no _CRS CSI-2 list entry yet,
+ * so create one for it and add it to the list.
+ */
+ acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list);
+ }
+ }
+ list_splice(&aux_list, &acpi_mipi_crs_csi2_list);
+
+ /*
+ * Allocate software nodes for representing the CSI-2 information.
+ *
+ * This needs to be done for all of the list entries in one go, because
+ * they may point to each other without restrictions and the next step
+ * relies on the availability of swnodes memory for each list entry.
+ */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry)
+ alloc_crs_csi2_swnodes(csi2);
+
+ /*
+ * Set up software node properties using data from _CRS CSI-2 resource
+ * descriptors.
+ */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry)
+ prepare_crs_csi2_swnodes(csi2);
+}
+
+/*
+ * Get the index of the next property in the property array, with a given
+ * maximum value.
+ */
+#define NEXT_PROPERTY(index, max) \
+ (WARN_ON((index) > ACPI_DEVICE_SWNODE_##max) ? \
+ ACPI_DEVICE_SWNODE_##max : (index)++)
+
+static void init_csi2_port_local(struct acpi_device *adev,
+ struct acpi_device_software_node_port *port,
+ struct fwnode_handle *port_fwnode,
+ unsigned int index)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ unsigned int num_link_freqs;
+ int ret;
+
+ ret = fwnode_property_count_u64(port_fwnode, "mipi-img-link-frequencies");
+ if (ret <= 0)
+ return;
+
+ num_link_freqs = ret;
+ if (num_link_freqs > ACPI_DEVICE_CSI2_DATA_LANES) {
+ acpi_handle_info(handle, "Too many link frequencies: %u\n",
+ num_link_freqs);
+ num_link_freqs = ACPI_DEVICE_CSI2_DATA_LANES;
+ }
+
+ ret = fwnode_property_read_u64_array(port_fwnode,
+ "mipi-img-link-frequencies",
+ port->link_frequencies,
+ num_link_freqs);
+ if (ret) {
+ acpi_handle_info(handle, "Unable to get link frequencies (%d)\n",
+ ret);
+ return;
+ }
+
+ port->ep_props[NEXT_PROPERTY(index, EP_LINK_FREQUENCIES)] =
+ PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies",
+ port->link_frequencies,
+ num_link_freqs);
+}
+
+static void init_csi2_port(struct acpi_device *adev,
+ struct acpi_device_software_nodes *swnodes,
+ struct acpi_device_software_node_port *port,
+ struct fwnode_handle *port_fwnode,
+ unsigned int port_index)
+{
+ unsigned int ep_prop_index = ACPI_DEVICE_SWNODE_EP_CLOCK_LANES;
+ acpi_handle handle = acpi_device_handle(adev);
+ u8 val[ACPI_DEVICE_CSI2_DATA_LANES];
+ int num_lanes = 0;
+ int ret;
+
+ if (GRAPH_PORT_NAME(port->port_name, port->port_nr))
+ return;
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)] =
+ SOFTWARE_NODE(port->port_name, port->port_props,
+ &swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT]);
+
+ ret = fwnode_property_read_u8(port_fwnode, "mipi-img-clock-lane", val);
+ if (!ret)
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_CLOCK_LANES)] =
+ PROPERTY_ENTRY_U32("clock-lanes", val[0]);
+
+ ret = fwnode_property_count_u8(port_fwnode, "mipi-img-data-lanes");
+ if (ret > 0) {
+ num_lanes = ret;
+
+ if (num_lanes > ACPI_DEVICE_CSI2_DATA_LANES) {
+ acpi_handle_info(handle, "Too many data lanes: %u\n",
+ num_lanes);
+ num_lanes = ACPI_DEVICE_CSI2_DATA_LANES;
+ }
+
+ ret = fwnode_property_read_u8_array(port_fwnode,
+ "mipi-img-data-lanes",
+ val, num_lanes);
+ if (!ret) {
+ unsigned int i;
+
+ for (i = 0; i < num_lanes; i++)
+ port->data_lanes[i] = val[i];
+
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_DATA_LANES)] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes",
+ port->data_lanes,
+ num_lanes);
+ }
+ }
+
+ ret = fwnode_property_count_u8(port_fwnode, "mipi-img-lane-polarities");
+ if (ret < 0) {
+ acpi_handle_debug(handle, "Lane polarity bytes missing\n");
+ } else if (ret * BITS_PER_TYPE(u8) < num_lanes + 1) {
+ acpi_handle_info(handle, "Too few lane polarity bits (%zu vs. %d)\n",
+ ret * BITS_PER_TYPE(u8), num_lanes + 1);
+ } else {
+ unsigned long mask = 0;
+ int byte_count = ret;
+ unsigned int i;
+
+ /*
+ * The total number of lanes is ACPI_DEVICE_CSI2_DATA_LANES + 1
+ * (data lanes + clock lane). It is not expected to ever be
+ * greater than the number of bits in an unsigned long
+ * variable, but ensure that this is the case.
+ */
+ BUILD_BUG_ON(BITS_PER_TYPE(unsigned long) <= ACPI_DEVICE_CSI2_DATA_LANES);
+
+ if (byte_count > sizeof(mask)) {
+ acpi_handle_info(handle, "Too many lane polarities: %d\n",
+ byte_count);
+ byte_count = sizeof(mask);
+ }
+ fwnode_property_read_u8_array(port_fwnode, "mipi-img-lane-polarities",
+ val, byte_count);
+
+ for (i = 0; i < byte_count; i++)
+ mask |= (unsigned long)val[i] << BITS_PER_TYPE(u8) * i;
+
+ for (i = 0; i <= num_lanes; i++)
+ port->lane_polarities[i] = test_bit(i, &mask);
+
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_LANE_POLARITIES)] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN("lane-polarities",
+ port->lane_polarities,
+ num_lanes + 1);
+ }
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_EP(port_index)] =
+ SOFTWARE_NODE("endpoint@0", swnodes->ports[port_index].ep_props,
+ &swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)]);
+
+ if (port->crs_csi2_local)
+ init_csi2_port_local(adev, port, port_fwnode, ep_prop_index);
+}
+
+#define MIPI_IMG_PORT_PREFIX "mipi-img-port-"
+
+static struct fwnode_handle *get_mipi_port_handle(struct fwnode_handle *adev_fwnode,
+ unsigned int port_nr)
+{
+ char port_name[sizeof(MIPI_IMG_PORT_PREFIX) + 2];
+
+ if (snprintf(port_name, sizeof(port_name), "%s%u",
+ MIPI_IMG_PORT_PREFIX, port_nr) >= sizeof(port_name))
+ return NULL;
+
+ return fwnode_get_named_child_node(adev_fwnode, port_name);
+}
+
+static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER };
+ struct acpi_device_software_nodes *swnodes = csi2->swnodes;
+ acpi_handle handle = csi2->handle;
+ unsigned int prop_index = 0;
+ struct fwnode_handle *adev_fwnode;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ /*
+ * Bail out if the swnodes are not available (either they have not been
+ * allocated or they have been assigned to the device already).
+ */
+ if (!swnodes)
+ return;
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return;
+
+ adev_fwnode = acpi_fwnode_handle(adev);
+
+ /*
+ * If the "rotation" property is not present, but _PLD is there,
+ * evaluate it to get the "rotation" value.
+ */
+ if (!fwnode_property_present(adev_fwnode, "rotation")) {
+ struct acpi_pld_info *pld;
+
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (ACPI_SUCCESS(status)) {
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
+ PROPERTY_ENTRY_U32("rotation",
+ pld->rotation * 45U);
+ kfree(pld);
+ }
+ }
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-clock-frequency", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_CLOCK_FREQUENCY)] =
+ PROPERTY_ENTRY_U32("clock-frequency", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-led-max-current", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_LED_MAX_MICROAMP)] =
+ PROPERTY_ENTRY_U32("led-max-microamp", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-current", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_MICROAMP)] =
+ PROPERTY_ENTRY_U32("flash-max-microamp", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-timeout-us", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_TIMEOUT_US)] =
+ PROPERTY_ENTRY_U32("flash-max-timeout-us", val);
+
+ status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_info(handle, "Unable to get the path name\n");
+ return;
+ }
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT] =
+ SOFTWARE_NODE(buffer.pointer, swnodes->dev_props, NULL);
+
+ for (i = 0; i < swnodes->num_ports; i++) {
+ struct acpi_device_software_node_port *port = &swnodes->ports[i];
+ struct fwnode_handle *port_fwnode;
+
+ /*
+ * The MIPI DisCo for Imaging specification defines _DSD device
+ * properties for providing CSI-2 port parameters that can be
+ * accessed through the generic device properties framework. To
+ * access them, it is first necessary to find the data node
+ * representing the port under the given ACPI device object.
+ */
+ port_fwnode = get_mipi_port_handle(adev_fwnode, port->port_nr);
+ if (!port_fwnode) {
+ acpi_handle_info(handle,
+ "MIPI port name too long for port %u\n",
+ port->port_nr);
+ continue;
+ }
+
+ init_csi2_port(adev, swnodes, port, port_fwnode, i);
+
+ fwnode_handle_put(port_fwnode);
+ }
+
+ ret = software_node_register_node_group(swnodes->nodeptrs);
+ if (ret < 0) {
+ acpi_handle_info(handle,
+ "Unable to register software nodes (%d)\n", ret);
+ return;
+ }
+
+ adev->swnodes = swnodes;
+ adev_fwnode->secondary = software_node_fwnode(swnodes->nodes);
+
+ /*
+ * Prevents the swnodes from this csi2 entry from being assigned again
+ * or freed prematurely.
+ */
+ csi2->swnodes = NULL;
+}
+
+/**
+ * acpi_mipi_init_crs_csi2_swnodes - Initialize _CRS CSI-2 software nodes
+ *
+ * Use MIPI DisCo for Imaging device properties to finalize the initialization
+ * of CSI-2 software nodes for all ACPI device objects that have been already
+ * enumerated.
+ */
+void acpi_mipi_init_crs_csi2_swnodes(void)
+{
+ struct crs_csi2 *csi2, *csi2_tmp;
+
+ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry)
+ init_crs_csi2_swnodes(csi2);
+}
+
+/**
+ * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data
+ */
+void acpi_mipi_crs_csi2_cleanup(void)
+{
+ struct crs_csi2 *csi2, *csi2_tmp;
+
+ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry)
+ acpi_mipi_del_crs_csi2(csi2);
+}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 12f330b0eac0..0214518fc582 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -67,9 +67,9 @@ int acpi_map_pxm_to_node(int pxm)
node = pxm_to_node_map[pxm];
if (node == NUMA_NO_NODE) {
- if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
+ if (node >= MAX_NUMNODES)
+ return NUMA_NO_NODE;
__acpi_map_pxm_to_node(pxm, node);
node_set(node, nodes_found_map);
}
@@ -183,7 +183,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
int i, j;
int d = slit->locality_count;
for (i = 0; i < d; i++) {
- for (j = 0; j < d; j++) {
+ for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
if (i == j) {
if (val != LOCAL_DISTANCE)
@@ -430,7 +430,7 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
return -EINVAL;
node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
- if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ if (node == NUMA_NO_NODE) {
pr_err("SRAT: Too many proximity domains.\n");
return -EINVAL;
}
@@ -532,7 +532,7 @@ int __init acpi_numa_init(void)
*/
/* fake_pxm is the next unused PXM value after SRAT parsing */
- for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
+ for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
if (node_to_pxm_map[i] > fake_pxm)
fake_pxm = node_to_pxm_map[i];
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c09cc3c68633..70af3fbbebe5 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -544,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
- u32 handled;
-
- handled = (*acpi_irq_handler) (acpi_irq_context);
-
- if (handled) {
+ if ((*acpi_irq_handler)(acpi_irq_context)) {
acpi_irq_handled++;
return IRQ_HANDLED;
} else {
@@ -582,7 +578,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT,
+ "acpi", acpi_irq)) {
pr_err("SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
@@ -1063,9 +1060,7 @@ int __init acpi_debugger_init(void)
acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
- acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
- struct workqueue_struct *queue;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -1076,9 +1071,9 @@ acpi_status acpi_os_execute(acpi_execute_type type,
ret = acpi_debugger_create_thread(function, context);
if (ret) {
pr_err("Kernel thread creation failed\n");
- status = AE_ERROR;
+ return AE_ERROR;
}
- goto out_thread;
+ return AE_OK;
}
/*
@@ -1096,43 +1091,41 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
/*
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
- if (type == OSL_NOTIFY_HANDLER) {
- queue = kacpi_notify_wq;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else if (type == OSL_GPE_HANDLER) {
- queue = kacpid_wq;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else {
+ switch (type) {
+ case OSL_NOTIFY_HANDLER:
+ ret = queue_work(kacpi_notify_wq, &dpc->work);
+ break;
+ case OSL_GPE_HANDLER:
+ /*
+ * On some machines, a software-initiated SMI causes corruption
+ * unless the SMI runs on CPU 0. An SMI can be initiated by
+ * any AML, but typically it's done in GPE-related methods that
+ * are run via workqueues, so we can avoid the known corruption
+ * cases by always queueing on CPU 0.
+ */
+ ret = queue_work_on(0, kacpid_wq, &dpc->work);
+ break;
+ default:
pr_err("Unsupported os_execute type %d.\n", type);
- status = AE_ERROR;
+ goto err;
}
-
- if (ACPI_FAILURE(status))
- goto err_workqueue;
-
- /*
- * On some machines, a software-initiated SMI causes corruption unless
- * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
- * typically it's done in GPE-related methods that are run via
- * workqueues, so we can avoid the known corruption cases by always
- * queueing on CPU 0.
- */
- ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
pr_err("Unable to queue work\n");
- status = AE_ERROR;
+ goto err;
}
-err_workqueue:
- if (ACPI_FAILURE(status))
- kfree(dpc);
-out_thread:
- return status;
+
+ return AE_OK;
+
+err:
+ kfree(dpc);
+ return AE_ERROR;
}
EXPORT_SYMBOL(acpi_os_execute);
@@ -1522,20 +1515,18 @@ void acpi_os_delete_lock(acpi_spinlock handle)
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
__acquires(lockp)
{
- acpi_cpu_flags flags;
-
- spin_lock_irqsave(lockp, flags);
- return flags;
+ spin_lock(lockp);
+ return 0;
}
/*
* Release a spinlock. See above.
*/
-void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
+void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used)
__releases(lockp)
{
- spin_unlock_irqrestore(lockp, flags);
+ spin_unlock(lockp);
}
#ifndef ACPI_USE_LOCAL_CACHE
@@ -1672,7 +1663,7 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index b7c6287eccca..1219adb11ab9 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -17,6 +17,8 @@
#include <acpi/processor.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
@@ -26,12 +28,21 @@
*/
#define CPUFREQ_THERMAL_MIN_STEP 0
-#define CPUFREQ_THERMAL_MAX_STEP 3
-static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
+static int cpufreq_thermal_max_step __read_mostly = 3;
+
+/*
+ * Minimum throttle percentage for processor_thermal cooling device.
+ * The processor_thermal driver uses it to calculate the percentage amount by
+ * which cpu frequency must be reduced for each cooling state. This is also used
+ * to calculate the maximum number of throttling steps or cooling states.
+ */
+static int cpufreq_thermal_reduction_pctg __read_mostly = 20;
-#define reduction_pctg(cpu) \
- per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_step);
+
+#define reduction_step(cpu) \
+ per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu))
/*
* Emulate "per package data" using per cpu data (which should really be
@@ -71,7 +82,7 @@ static int cpufreq_get_max_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return CPUFREQ_THERMAL_MAX_STEP;
+ return cpufreq_thermal_max_step;
}
static int cpufreq_get_cur_state(unsigned int cpu)
@@ -79,7 +90,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return reduction_pctg(cpu);
+ return reduction_step(cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
@@ -92,7 +103,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (!cpu_has_cpufreq(cpu))
return 0;
- reduction_pctg(cpu) = state;
+ reduction_step(cpu) = state;
/*
* Update all the CPUs in the same package because they all
@@ -113,7 +124,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (!policy)
return -EINVAL;
- max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
cpufreq_cpu_put(policy);
@@ -126,10 +138,29 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
return 0;
}
+static void acpi_thermal_cpufreq_config(void)
+{
+ int cpufreq_pctg = acpi_arch_thermal_cpufreq_pctg();
+
+ if (!cpufreq_pctg)
+ return;
+
+ cpufreq_thermal_reduction_pctg = cpufreq_pctg;
+
+ /*
+ * Derive the MAX_STEP from minimum throttle percentage so that the reduction
+ * percentage doesn't end up becoming negative. Also, cap the MAX_STEP so that
+ * the CPU performance doesn't become 0.
+ */
+ cpufreq_thermal_max_step = (100 / cpufreq_pctg) - 2;
+}
+
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ acpi_thermal_cpufreq_config();
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
@@ -190,7 +221,7 @@ static int acpi_processor_max_state(struct acpi_processor *pr)
/*
* There exists four states according to
- * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
+ * cpufreq_thermal_reduction_step. 0, 1, 2, 3
*/
max_state += cpufreq_get_max_state(pr->id);
if (pr->flags.throttling)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 6979a3f9f90a..07d76fb740b6 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -2,14 +2,17 @@
/*
* ACPI device specific properties support.
*
- * Copyright (C) 2014, Intel Corporation
+ * Copyright (C) 2014 - 2023, Intel Corporation
* All rights reserved.
*
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Darren Hart <dvhart@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Sakari Ailus <sakari.ailus@linux.intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -801,27 +804,15 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
u32 nargs = 0, i;
/*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
- (*element)++) {
- const char *child_name = (*element)->string.pointer;
-
- ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
* Assume the following integer elements are all args. Stop counting on
- * the first reference or end of the package arguments. In case of
- * neither reference, nor integer, return an error, we can't parse it.
+ * the first reference (possibly represented as a string) or end of the
+ * package arguments. In case of neither reference, nor integer, return
+ * an error, we can't parse it.
*/
for (i = 0; (*element) + i < end && i < num_args; i++) {
acpi_object_type type = (*element)[i].type;
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ if (type == ACPI_TYPE_LOCAL_REFERENCE || type == ACPI_TYPE_STRING)
break;
if (type == ACPI_TYPE_INTEGER)
@@ -845,6 +836,44 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
return 0;
}
+static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *fwnode,
+ const char *refstring)
+{
+ acpi_handle scope, handle;
+ struct acpi_data_node *dn;
+ struct acpi_device *device;
+ acpi_status status;
+
+ if (is_acpi_device_node(fwnode)) {
+ scope = to_acpi_device_node(fwnode)->handle;
+ } else if (is_acpi_data_node(fwnode)) {
+ scope = to_acpi_data_node(fwnode)->handle;
+ } else {
+ pr_debug("Bad node type for node %pfw\n", fwnode);
+ return NULL;
+ }
+
+ status = acpi_get_handle(scope, refstring, &handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(scope, "Unable to get an ACPI handle for %s\n",
+ refstring);
+ return NULL;
+ }
+
+ device = acpi_fetch_acpi_dev(handle);
+ if (device)
+ return acpi_fwnode_handle(device);
+
+ status = acpi_get_data_full(handle, acpi_nondev_subnode_tag,
+ (void **)&dn, NULL);
+ if (ACPI_FAILURE(status) || !dn) {
+ acpi_handle_debug(handle, "Subnode not found\n");
+ return NULL;
+ }
+
+ return &dn->fwnode;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -887,6 +916,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const union acpi_object *element, *end;
const union acpi_object *obj;
const struct acpi_device_data *data;
+ struct fwnode_handle *ref_fwnode;
struct acpi_device *device;
int ret, idx = 0;
@@ -910,16 +940,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
+
+ return 0;
+ case ACPI_TYPE_STRING:
+ if (index)
+ return -ENOENT;
+
+ ref_fwnode = acpi_parse_string_ref(fwnode, obj->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+
+ args->fwnode = ref_fwnode;
+ args->nargs = 0;
+
return 0;
case ACPI_TYPE_PACKAGE:
/*
* If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
+ * references, followed by number of ints as follows:
*
* Package () { REF, INT, REF, INT, INT }
*
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
+ * Here, REF may be either a local reference or a string. The
+ * index argument is then used to determine which reference the
+ * caller wants (along with the arguments).
*/
break;
default:
@@ -951,6 +995,24 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return 0;
break;
+ case ACPI_TYPE_STRING:
+ ref_fwnode = acpi_parse_string_ref(fwnode,
+ element->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+
+ element++;
+
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ ref_fwnode, &element, end,
+ num_args);
+ if (ret < 0)
+ return ret;
+
+ if (idx == index)
+ return 0;
+
+ break;
case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 9bd9f79cd409..dacad1d846c0 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -462,6 +462,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus ExpertBook B1502CGA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
+ },
+ },
+ {
/* Asus ExpertBook B2402CBA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -483,6 +490,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook E1504GA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "E1504GA"),
+ },
+ },
+ {
+ /* Asus Vivobook E1504GAB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -511,6 +532,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
@@ -548,6 +576,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
},
},
+ {
+ /* Infinity E15-5A165-BM */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"),
+ },
+ },
+ {
+ /* Infinity E15-5A305-1M */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 02bb2cce423f..950d3b02a2a9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1981,10 +1981,9 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
-static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
+static u32 acpi_scan_check_dep(acpi_handle handle)
{
struct acpi_handle_list dep_devices;
- acpi_status status;
u32 count;
int i;
@@ -1994,12 +1993,10 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
* 2. ACPI nodes describing USB ports.
* Still, checking for _HID catches more then just these cases ...
*/
- if (!check_dep || !acpi_has_method(handle, "_DEP") ||
- !acpi_has_method(handle, "_HID"))
+ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
return 0;
- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) {
acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
return 0;
}
@@ -2008,6 +2005,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
struct acpi_device_info *info;
struct acpi_dep_data *dep;
bool skip, honor_dep;
+ acpi_status status;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
@@ -2041,7 +2039,13 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
return count;
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c)
+{
+ acpi_mipi_check_crs_csi2(handle);
+ return AE_OK;
+}
+
+static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass,
struct acpi_device **adev_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
@@ -2059,9 +2063,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
if (acpi_device_should_be_hidden(handle))
return AE_OK;
- /* Bail out if there are dependencies. */
- if (acpi_scan_check_dep(handle, check_dep) > 0)
- return AE_CTRL_DEPTH;
+ if (first_pass) {
+ acpi_mipi_check_crs_csi2(handle);
+
+ /* Bail out if there are dependencies. */
+ if (acpi_scan_check_dep(handle) > 0) {
+ /*
+ * The entire CSI-2 connection graph needs to be
+ * extracted before any drivers or scan handlers
+ * are bound to struct device objects, so scan
+ * _CRS CSI-2 resource descriptors for all
+ * devices below the current handle.
+ */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ acpi_scan_check_crs_csi2_cb,
+ NULL, NULL, NULL);
+ return AE_CTRL_DEPTH;
+ }
+ }
fallthrough;
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
@@ -2084,10 +2104,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
}
/*
- * If check_dep is true at this point, the device has no dependencies,
+ * If first_pass is true at this point, the device has no dependencies,
* or the creation of the device object would have been postponed above.
*/
- acpi_add_single_object(&device, handle, type, !check_dep);
+ acpi_add_single_object(&device, handle, type, !first_pass);
if (!device)
return AE_CTRL_DEPTH;
@@ -2431,6 +2451,13 @@ static void acpi_scan_postponed_branch(acpi_handle handle)
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add_2, NULL, NULL, (void **)&adev);
+
+ /*
+ * Populate the ACPI _CRS CSI-2 software nodes for the ACPI devices that
+ * have been added above.
+ */
+ acpi_mipi_init_crs_csi2_swnodes();
+
acpi_bus_attach(adev, NULL);
}
@@ -2499,12 +2526,22 @@ int acpi_bus_scan(acpi_handle handle)
if (!device)
return -ENODEV;
+ /*
+ * Set up ACPI _CRS CSI-2 software nodes using information extracted
+ * from the _CRS CSI-2 resource descriptors during the ACPI namespace
+ * walk above and MIPI DisCo for Imaging device properties.
+ */
+ acpi_mipi_scan_crs_csi2();
+ acpi_mipi_init_crs_csi2_swnodes();
+
acpi_bus_attach(device, (void *)true);
/* Pass 2: Enumerate all of the remaining devices. */
acpi_scan_postponed();
+ acpi_mipi_crs_csi2_cleanup();
+
return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index f74d81abdbfc..4748e8061253 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -31,6 +31,8 @@
#include <linux/uaccess.h>
#include <linux/units.h>
+#include "internal.h"
+
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
@@ -90,7 +92,7 @@ struct acpi_thermal_passive {
struct acpi_thermal_trip trip;
unsigned long tc1;
unsigned long tc2;
- unsigned long tsp;
+ unsigned long delay;
};
struct acpi_thermal_active {
@@ -188,24 +190,19 @@ static int active_trip_index(struct acpi_thermal *tz,
static long get_passive_temp(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, "_PSV", NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (acpi_passive_trip_temp(tz->device, &temp))
return THERMAL_TEMP_INVALID;
- return tmp;
+ return temp;
}
static long get_active_temp(struct acpi_thermal *tz, int index)
{
- char method[] = { '_', 'A', 'C', '0' + index, '\0' };
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, method, NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (acpi_active_trip_temp(tz->device, index, &temp))
return THERMAL_TEMP_INVALID;
/*
@@ -215,10 +212,10 @@ static long get_active_temp(struct acpi_thermal *tz, int index)
if (act > 0) {
unsigned long long override = celsius_to_deci_kelvin(act);
- if (tmp > override)
- tmp = override;
+ if (temp > override)
+ return override;
}
- return tmp;
+ return temp;
}
static void acpi_thermal_update_trip(struct acpi_thermal *tz,
@@ -247,7 +244,6 @@ static bool update_trip_devices(struct acpi_thermal *tz,
{
struct acpi_handle_list devices = { 0 };
char method[] = "_PSL";
- acpi_status status;
if (index != ACPI_THERMAL_TRIP_PASSIVE) {
method[1] = 'A';
@@ -255,8 +251,7 @@ static bool update_trip_devices(struct acpi_thermal *tz,
method[3] = '0' + index;
}
- status = acpi_evaluate_reference(tz->device->handle, method, NULL, &devices);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_evaluate_reference(tz->device->handle, method, NULL, &devices)) {
acpi_handle_info(tz->device->handle, "%s evaluation failure\n", method);
return false;
}
@@ -297,6 +292,7 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
struct acpi_thermal_trip *acpi_trip = trip->priv;
struct adjust_trip_data *atd = data;
struct acpi_thermal *tz = atd->tz;
+ int temp;
if (!acpi_trip || !acpi_thermal_trip_valid(acpi_trip))
return 0;
@@ -307,9 +303,11 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
acpi_thermal_update_trip_devices(tz, trip);
if (acpi_thermal_trip_valid(acpi_trip))
- trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk);
+ temp = acpi_thermal_temp(tz, acpi_trip->temp_dk);
else
- trip->temperature = THERMAL_TEMP_INVALID;
+ temp = THERMAL_TEMP_INVALID;
+
+ thermal_zone_set_trip_temp(tz->thermal_zone, trip, temp);
return 0;
}
@@ -339,13 +337,12 @@ static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event)
dev_name(&adev->dev), event, 0);
}
-static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
+static int acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
if (crt > 0) {
- tmp = celsius_to_deci_kelvin(crt);
+ temp = celsius_to_deci_kelvin(crt);
goto set;
}
if (crt == -1) {
@@ -353,38 +350,34 @@ static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
return THERMAL_TEMP_INVALID;
}
- status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- acpi_handle_debug(tz->device->handle, "No critical threshold\n");
+ if (acpi_critical_trip_temp(tz->device, &temp))
return THERMAL_TEMP_INVALID;
- }
- if (tmp <= 2732) {
+
+ if (temp <= 2732) {
/*
* Below zero (Celsius) values clearly aren't right for sure,
* so discard them as invalid.
*/
- pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
+ pr_info(FW_BUG "Invalid critical threshold (%d)\n", temp);
return THERMAL_TEMP_INVALID;
}
set:
- acpi_handle_debug(tz->device->handle, "Critical threshold [%llu]\n", tmp);
- return tmp;
+ acpi_handle_debug(tz->device->handle, "Critical threshold [%d]\n", temp);
+ return temp;
}
-static long acpi_thermal_get_hot_trip(struct acpi_thermal *tz)
+static int acpi_thermal_get_hot_trip(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
+ if (acpi_hot_trip_temp(tz->device, &temp) || temp == THERMAL_TEMP_INVALID) {
acpi_handle_debug(tz->device->handle, "No hot threshold\n");
return THERMAL_TEMP_INVALID;
}
- acpi_handle_debug(tz->device->handle, "Hot threshold [%llu]\n", tmp);
- return tmp;
+ acpi_handle_debug(tz->device->handle, "Hot threshold [%d]\n", temp);
+ return temp;
}
static bool passive_trip_params_init(struct acpi_thermal *tz)
@@ -404,11 +397,17 @@ static bool passive_trip_params_init(struct acpi_thermal *tz)
tz->trips.passive.tc2 = tmp;
+ status = acpi_evaluate_integer(tz->device->handle, "_TFP", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ tz->trips.passive.delay = tmp;
+ return true;
+ }
+
status = acpi_evaluate_integer(tz->device->handle, "_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
return false;
- tz->trips.passive.tsp = tmp;
+ tz->trips.passive.delay = tmp * 100;
return true;
}
@@ -904,7 +903,7 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_trip = &tz->trips.passive.trip;
if (acpi_thermal_trip_valid(acpi_trip)) {
- passive_delay = tz->trips.passive.tsp * 100;
+ passive_delay = tz->trips.passive.delay;
trip->type = THERMAL_TRIP_PASSIVE;
trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk);
@@ -1142,6 +1141,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
+MODULE_IMPORT_NS(ACPI_THERMAL);
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_acpi.c b/drivers/acpi/thermal_lib.c
index 43eaf0f2ff49..4e0519ca9739 100644
--- a/drivers/thermal/thermal_acpi.c
+++ b/drivers/acpi/thermal_lib.c
@@ -3,12 +3,13 @@
* Copyright 2023 Linaro Limited
* Copyright 2023 Intel Corporation
*
- * Library routines for populating a generic thermal trip point structure
- * with data obtained by evaluating a specific object in the ACPI Namespace.
+ * Library routines for retrieving trip point temperature values from the
+ * platform firmware via ACPI.
*/
#include <linux/acpi.h>
#include <linux/units.h>
#include <linux/thermal.h>
+#include "internal.h"
/*
* Minimum temperature for full military grade is 218°K (-55°C) and
@@ -17,11 +18,11 @@
* firmware. Any values out of these boundaries may be considered
* bogus and we can assume the firmware has no data to provide.
*/
-#define TEMP_MIN_DECIK 2180
-#define TEMP_MAX_DECIK 4480
+#define TEMP_MIN_DECIK 2180ULL
+#define TEMP_MAX_DECIK 4480ULL
-static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
- int *ret_temp)
+static int acpi_trip_temp(struct acpi_device *adev, char *obj_name,
+ int *ret_temp)
{
unsigned long long temp;
acpi_status status;
@@ -33,7 +34,7 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
}
if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) {
- *ret_temp = deci_kelvin_to_millicelsius(temp);
+ *ret_temp = temp;
} else {
acpi_handle_debug(adev->handle, "%s result %llu out of range\n",
obj_name, temp);
@@ -43,6 +44,48 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
return 0;
}
+int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
+{
+ char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'};
+
+ if (id < 0 || id > 9)
+ return -EINVAL;
+
+ return acpi_trip_temp(adev, obj_name, ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
+
+int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_PSV", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
+
+int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_HOT", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
+
+int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_CRT", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
+
+static int thermal_temp(int error, int temp_decik, int *ret_temp)
+{
+ if (error)
+ return error;
+
+ if (temp_decik == THERMAL_TEMP_INVALID)
+ *ret_temp = THERMAL_TEMP_INVALID;
+ else
+ *ret_temp = deci_kelvin_to_millicelsius(temp_decik);
+
+ return 0;
+}
+
/**
* thermal_acpi_active_trip_temp - Retrieve active trip point temperature
* @adev: Target thermal zone ACPI device object.
@@ -57,12 +100,10 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
*/
int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
{
- char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'};
-
- if (id < 0 || id > 9)
- return -EINVAL;
+ int temp_decik;
+ int ret = acpi_active_trip_temp(adev, id, &temp_decik);
- return thermal_acpi_trip_temp(adev, obj_name, ret_temp);
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
@@ -78,7 +119,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
*/
int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_PSV", ret_temp);
+ int temp_decik;
+ int ret = acpi_passive_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
@@ -95,7 +139,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
*/
int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_HOT", ret_temp);
+ int temp_decik;
+ int ret = acpi_hot_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
@@ -111,6 +158,9 @@ EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
*/
int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_CRT", ret_temp);
+ int temp_decik;
+ int ret = acpi_critical_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 62944e35fcee..abac5cc25477 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -329,21 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list)
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list)
{
- acpi_status status = AE_OK;
- union acpi_object *package = NULL;
- union acpi_object *element = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- u32 i = 0;
-
+ union acpi_object *package;
+ acpi_status status;
+ bool ret = false;
+ u32 i;
if (!list)
- return AE_BAD_PARAMETER;
+ return false;
/* Evaluate object. */
@@ -353,62 +350,47 @@ acpi_evaluate_reference(acpi_handle handle,
package = buffer.pointer;
- if ((buffer.length == 0) || !package) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
- if (package->type != ACPI_TYPE_PACKAGE) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
- if (!package->package.count) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
+ if (buffer.length == 0 || !package ||
+ package->type != ACPI_TYPE_PACKAGE || !package->package.count)
+ goto err;
- list->handles = kcalloc(package->package.count, sizeof(*list->handles), GFP_KERNEL);
- if (!list->handles) {
- kfree(package);
- return AE_NO_MEMORY;
- }
list->count = package->package.count;
+ list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL);
+ if (!list->handles)
+ goto err_clear;
/* Extract package data. */
for (i = 0; i < list->count; i++) {
+ union acpi_object *element = &(package->package.elements[i]);
- element = &(package->package.elements[i]);
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE ||
+ !element->reference.handle)
+ goto err_free;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- break;
- }
-
- if (!element->reference.handle) {
- status = AE_NULL_ENTRY;
- acpi_util_eval_error(handle, pathname, status);
- break;
- }
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
- if (ACPI_FAILURE(status)) {
- list->count = 0;
- kfree(list->handles);
- list->handles = NULL;
- }
+ ret = true;
end:
kfree(buffer.pointer);
- return status;
+ return ret;
+
+err_free:
+ kfree(list->handles);
+ list->handles = NULL;
+
+err_clear:
+ list->count = 0;
+
+err:
+ acpi_util_eval_error(handle, pathname, status);
+ goto end;
}
EXPORT_SYMBOL(acpi_evaluate_reference);
@@ -426,7 +408,7 @@ bool acpi_handle_list_equal(struct acpi_handle_list *list1,
{
return list1->count == list2->count &&
!memcmp(list1->handles, list2->handles,
- list1->count * sizeof(acpi_handle));
+ list1->count * sizeof(*list1->handles));
}
EXPORT_SYMBOL_GPL(acpi_handle_list_equal);
@@ -468,6 +450,40 @@ void acpi_handle_list_free(struct acpi_handle_list *list)
}
EXPORT_SYMBOL_GPL(acpi_handle_list_free);
+/**
+ * acpi_device_dep - Check ACPI device dependency
+ * @target: ACPI handle of the target ACPI device.
+ * @match: ACPI handle to look up in the target's _DEP list.
+ *
+ * Return true if @match is present in the list returned by _DEP for
+ * @target or false otherwise.
+ */
+bool acpi_device_dep(acpi_handle target, acpi_handle match)
+{
+ struct acpi_handle_list dep_devices;
+ bool ret = false;
+ int i;
+
+ if (!acpi_has_method(target, "_DEP"))
+ return false;
+
+ if (!acpi_evaluate_reference(target, "_DEP", NULL, &dep_devices)) {
+ acpi_handle_debug(target, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == match) {
+ ret = true;
+ break;
+ }
+ }
+
+ acpi_handle_list_free(&dep_devices);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_device_dep);
+
acpi_status
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
@@ -825,54 +841,6 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
- * acpi_dev_uid_match - Match device by supplied UID
- * @adev: ACPI device to match.
- * @uid2: Unique ID of the device.
- *
- * Matches UID in @adev with given @uid2.
- *
- * Returns:
- * - %true if matches.
- * - %false otherwise.
- */
-bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
-{
- const char *uid1 = acpi_device_uid(adev);
-
- return uid1 && uid2 && !strcmp(uid1, uid2);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_uid_match);
-
-/**
- * acpi_dev_hid_uid_match - Match device by supplied HID and UID
- * @adev: ACPI device to match.
- * @hid2: Hardware ID of the device.
- * @uid2: Unique ID of the device, pass NULL to not check _UID.
- *
- * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
- * will be treated as a match. If user wants to validate @uid2, it should be
- * done before calling this function.
- *
- * Returns:
- * - %true if matches or @uid2 is NULL.
- * - %false otherwise.
- */
-bool acpi_dev_hid_uid_match(struct acpi_device *adev,
- const char *hid2, const char *uid2)
-{
- const char *hid1 = acpi_device_hid(adev);
-
- if (strcmp(hid1, hid2))
- return false;
-
- if (!uid2)
- return true;
-
- return acpi_dev_uid_match(adev, uid2);
-}
-EXPORT_SYMBOL(acpi_dev_hid_uid_match);
-
-/**
* acpi_dev_uid_to_integer - treat ACPI device _UID as integer
* @adev: ACPI device to get _UID from
* @integer: output buffer for integer
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 92128aae2d06..7658103ba760 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1921,7 +1921,7 @@ static void binder_deferred_fd_close(int fd)
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
- twcb->file = close_fd_get_file(fd);
+ twcb->file = file_close_fd(fd);
if (twcb->file) {
// pin it until binder_do_fd_close(); see comments there
get_file(twcb->file);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 138f6d43d13b..f69d30c9f50f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -234,7 +234,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (page->page_ptr) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ on_lru = list_lru_del_obj(&binder_alloc_lru, &page->lru);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -285,7 +285,7 @@ free_range:
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ ret = list_lru_add_obj(&binder_alloc_lru, &page->lru);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
@@ -848,7 +848,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
if (!alloc->pages[i].page_ptr)
continue;
- on_lru = list_lru_del(&binder_alloc_lru,
+ on_lru = list_lru_del_obj(&binder_alloc_lru,
&alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -1287,4 +1287,3 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
dest, bytes);
}
-
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index eaa31e567d1e..5b59d133b6af 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-static int __init early_cpu_to_node(int cpu)
+int __init early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index b741b5ba82bd..5aaa0865625d 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#include <linux/units.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal_pressure.h>
@@ -26,7 +27,8 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-static DEFINE_PER_CPU(u32, freq_factor) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure);
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
* thermal capping. It might be also a boost frequency value, which is bigger
- * than the internal 'freq_factor' max frequency. In such case the pressure
- * value should simply be removed, since this is an indication that there is
- * no thermal throttling. The @capped_freq must be provided in kHz.
+ * than the internal 'capacity_freq_ref' max frequency. In such case the
+ * pressure value should simply be removed, since this is an indication that
+ * there is no thermal throttling. The @capped_freq must be provided in kHz.
*/
void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
@@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
- max_freq = per_cpu(freq_factor, cpu);
-
- /* Convert to MHz scale which is used in 'freq_factor' */
- capped_freq /= 1000;
+ max_freq = arch_scale_freq_ref(cpu);
/*
* Handle properly the boost frequencies, which should simply clean
@@ -279,13 +278,13 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -321,15 +320,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
cpu_node, raw_capacity[cpu]);
/*
- * Update freq_factor for calculating early boot cpu capacities.
+ * Update capacity_freq_ref for calculating early boot CPU capacities.
* For non-clk CPU DVFS mechanism, there's no way to get the
* frequency value now, assuming they are running at the same
- * frequency (by keeping the initial freq_factor value).
+ * frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
- per_cpu(freq_factor, cpu) =
- clk_get_rate(cpu_clk) / 1000;
+ per_cpu(capacity_freq_ref, cpu) =
+ clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
}
} else {
@@ -345,11 +344,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
+{
+}
+
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
void topology_init_cpu_capacity_cppc(void)
{
+ u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
int cpu;
@@ -366,6 +370,10 @@ void topology_init_cpu_capacity_cppc(void)
(perf_caps.highest_perf >= perf_caps.nominal_perf) &&
(perf_caps.highest_perf >= perf_caps.lowest_perf)) {
raw_capacity[cpu] = perf_caps.highest_perf;
+ capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
+
+ per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
+
pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
cpu, raw_capacity[cpu]);
continue;
@@ -376,7 +384,18 @@ void topology_init_cpu_capacity_cppc(void)
goto exit;
}
- topology_normalize_cpu_scale();
+ for_each_possible_cpu(cpu) {
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+
+ capacity = raw_capacity[cpu];
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
+ topology_set_cpu_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, topology_get_cpu_scale(cpu));
+ }
+
schedule_work(&update_topology_flags_work);
pr_debug("cpu_capacity: cpu_capacity initialization done\n");
@@ -410,8 +429,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus)
- per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
+ for_each_cpu(cpu, policy->related_cpus) {
+ per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+ }
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f85f3515c258..9c5a5f4dba5a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
}
/**
- * device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -655,7 +655,13 @@ Skip:
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
}
static bool is_async(struct device *dev)
@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(func, dev);
+ if (!is_async(dev))
+ return false;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
return true;
- }
+
+ put_device(dev);
return false;
}
@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_noirq(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_noirq))
+ return;
+
+ __device_resume_noirq(dev, pm_transition, false);
+}
+
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
- dpm_async_fn(dev, async_resume_noirq);
-
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev);
@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_noirq(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
- }
+ device_resume_noirq(dev);
put_device(dev);
@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
}
/**
- * device_resume_early - Execute an "early resume" callback for given device.
+ * __device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -811,21 +807,31 @@ Out:
pm_runtime_enable(dev);
complete_all(&dev->power.completion);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_early(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_early(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_early(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_early))
+ return;
+
+ __device_resume_early(dev, pm_transition, false);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
- dpm_async_fn(dev, async_resume_early);
-
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_early(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
- }
+ device_resume_early(dev);
put_device(dev);
@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
- * device_resume - Execute "resume" callbacks for given device.
+ * __device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state, bool async)
+static void __device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
- return error;
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume))
+ return;
+
+ __device_resume(dev, pm_transition, false);
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
- dpm_async_fn(dev, async_resume);
-
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
+
get_device(dev);
- if (!is_async(dev)) {
- int error;
- mutex_unlock(&dpm_list_mtx);
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume(dev);
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
+ mutex_lock(&dpm_list_mtx);
- mutex_lock(&dpm_list_mtx);
- }
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 9a9ea514c2d8..583dd5d7d46b 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -318,6 +318,7 @@ struct regmap_ram_data {
bool *read;
bool *written;
enum regmap_endian reg_endian;
+ bool (*noinc_reg)(struct regmap_ram_data *data, unsigned int reg);
};
/*
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bdd80b73c3e6..fb84cda92a75 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
@@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index e14cc03a17f6..026bdcb45127 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -1186,6 +1186,65 @@ static void raw_write(struct kunit *test)
regmap_exit(map);
}
+static bool reg_zero(struct device *dev, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static void raw_noinc_write(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val, val_test, val_last;
+ u16 val_array[BLOCK_TEST_SIZE];
+
+ config = raw_regmap_config;
+ config.volatile_reg = reg_zero;
+ config.writeable_noinc_reg = reg_zero;
+ config.readable_noinc_reg = reg_zero;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ data->noinc_reg = ram_reg_zero;
+
+ get_random_bytes(&val_array, sizeof(val_array));
+
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
+ val_test = be16_to_cpu(val_array[1]) + 100;
+ val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ } else {
+ val_test = le16_to_cpu(val_array[1]) + 100;
+ val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ }
+
+ /* Put some data into the register following the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
+
+ /* Write some data to the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
+ sizeof(val_array)));
+
+ /* We should read back the last value written */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
+ KUNIT_ASSERT_EQ(test, val_last, val);
+
+ /* Make sure we didn't touch the register after the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_ASSERT_EQ(test, val_test, val);
+
+ regmap_exit(map);
+}
+
static void raw_sync(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
@@ -1284,6 +1343,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
+ KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 85f34a5dee04..192d6b131dff 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -65,12 +65,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index c9b800885f3b..93ae07b503fd 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -41,10 +41,15 @@ static int regmap_raw_ram_gather_write(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(&our_buf[r], val, val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->written[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ memcpy(&our_buf[r], val + val_len - 2, 2);
+ data->written[r] = true;
+ } else {
+ memcpy(&our_buf[r], val, val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->written[r + i] = true;
+ }
return 0;
}
@@ -70,10 +75,16 @@ static int regmap_raw_ram_read(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(val, &our_buf[r], val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->read[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ for (i = 0; i < val_len; i += 2)
+ memcpy(val + i, &our_buf[r], 2);
+ data->read[r] = true;
+ } else {
+ memcpy(val, &our_buf[r], val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->read[r + i] = true;
+ }
return 0;
}
@@ -111,12 +122,12 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ea6157747199..6db77d8e45f9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2136,7 +2136,7 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
}
/**
- * regmap_noinc_write(): Write data from a register without incrementing the
+ * regmap_noinc_write(): Write data to a register without incrementing the
* register number
*
* @map: Register map to write to
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 11114a5d9e5c..d0e41d52d6a9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3079,7 +3079,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
}
}
-#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
+#define MAX_LEN (1UL << MAX_PAGE_ORDER << PAGE_SHIFT)
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f2d412fc560..8a8cd4fc9238 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -245,9 +245,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
- file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
- file_end_write(file);
if (likely(bw == bvec->bv_len))
return 0;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 83600b45e12a..3eaf02ebeebe 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -893,12 +893,9 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
*/
if (ublk_need_map_req(req)) {
struct iov_iter iter;
- struct iovec iov;
const int dir = ITER_DEST;
- import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
- &iov, &iter);
-
+ import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
@@ -915,13 +912,11 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
if (ublk_need_unmap_req(req)) {
struct iov_iter iter;
- struct iovec iov;
const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
- import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
- &iov, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 0386b7da02aa..7b29cce60ab2 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -59,8 +59,8 @@ config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
depends on ZRAM
help
- With incompressible page, there is no memory saving to keep it
- in memory. Instead, write it out to backing device.
+ This lets zram entries (incompressible or idle pages) be written
+ back to a backing device, helping save memory.
For this feature, admin should set up backing device via
/sys/block/zramX/backing_dev.
@@ -69,9 +69,18 @@ config ZRAM_WRITEBACK
See Documentation/admin-guide/blockdev/zram.rst for more information.
+config ZRAM_TRACK_ENTRY_ACTIME
+ bool "Track access time of zram entries"
+ depends on ZRAM
+ help
+ With this feature zram tracks access time of every stored
+ entry (page), which can be used for a more fine grained IDLE
+ pages writeback.
+
config ZRAM_MEMORY_TRACKING
bool "Track zRam block status"
depends on ZRAM && DEBUG_FS
+ select ZRAM_TRACK_ENTRY_ACTIME
help
With this feature, admin can track the state of allocated blocks
of zRAM. Admin could see the information via
@@ -86,4 +95,4 @@ config ZRAM_MULTI_COMP
This will enable multi-compression streams, so that ZRAM can
re-compress pages using a potentially slower but more effective
compression algorithm. Note, that IDLE page recompression
- requires ZRAM_MEMORY_TRACKING.
+ requires ZRAM_TRACK_ENTRY_ACTIME.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d77d3664ca08..2b1d82473be8 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -174,6 +174,14 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
return prio & ZRAM_COMP_PRIORITY_MASK;
}
+static void zram_accessed(struct zram *zram, u32 index)
+{
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ zram->table[index].ac_time = ktime_get_boottime();
+#endif
+}
+
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
@@ -293,8 +301,9 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
zram_slot_lock(zram, index);
if (zram_allocated(zram, index) &&
!zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
- is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ is_idle = !cutoff || ktime_after(cutoff,
+ zram->table[index].ac_time);
#endif
if (is_idle)
zram_set_flag(zram, index, ZRAM_IDLE);
@@ -317,7 +326,7 @@ static ssize_t idle_store(struct device *dev,
*/
u64 age_sec;
- if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec))
+ if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
cutoff_time = ktime_sub(ktime_get_boottime(),
ns_to_ktime(age_sec * NSEC_PER_SEC));
else
@@ -841,12 +850,6 @@ static void zram_debugfs_destroy(void)
debugfs_remove_recursive(zram_debugfs_root);
}
-static void zram_accessed(struct zram *zram, u32 index)
-{
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram->table[index].ac_time = ktime_get_boottime();
-}
-
static ssize_t read_block_state(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -930,10 +933,6 @@ static void zram_debugfs_unregister(struct zram *zram)
#else
static void zram_debugfs_create(void) {};
static void zram_debugfs_destroy(void) {};
-static void zram_accessed(struct zram *zram, u32 index)
-{
- zram_clear_flag(zram, index, ZRAM_IDLE);
-};
static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
@@ -1254,7 +1253,7 @@ static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
if (zram_test_flag(zram, index, ZRAM_IDLE))
@@ -1322,9 +1321,9 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
void *mem;
value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_atomic(page);
+ mem = kmap_local_page(page);
zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_atomic(mem);
+ kunmap_local(mem);
return 0;
}
@@ -1337,14 +1336,14 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ kunmap_local(dst);
ret = 0;
} else {
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
ret = zcomp_decompress(zstrm, src, size, dst);
- kunmap_atomic(dst);
+ kunmap_local(dst);
zcomp_stream_put(zram->comps[prio]);
}
zs_unmap_object(zram->mem_pool, handle);
@@ -1417,21 +1416,21 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element = 0;
enum zram_pageflags flags = 0;
- mem = kmap_atomic(page);
+ mem = kmap_local_page(page);
if (page_same_filled(mem, &element)) {
- kunmap_atomic(mem);
+ kunmap_local(mem);
/* Free memory associated with this sector now. */
flags = ZRAM_SAME;
atomic64_inc(&zram->stats.same_pages);
goto out;
}
- kunmap_atomic(mem);
+ kunmap_local(mem);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
ret = zcomp_compress(zstrm, src, &comp_len);
- kunmap_atomic(src);
+ kunmap_local(src);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1495,10 +1494,10 @@ compress_again:
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
- kunmap_atomic(src);
+ kunmap_local(src);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
@@ -1615,9 +1614,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
ret = zcomp_compress(zstrm, src, &comp_len_new);
- kunmap_atomic(src);
+ kunmap_local(src);
if (ret) {
zcomp_stream_put(zram->comps[prio]);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index d090753f97be..3b94d12f41b4 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -69,7 +69,7 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4a9c79391dee..456be28ba67c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1364,7 +1364,6 @@ static void __cold try_to_generate_entropy(void)
SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
{
struct iov_iter iter;
- struct iovec iov;
int ret;
if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
@@ -1385,7 +1384,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
return ret;
}
- ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
+ ret = import_ubuf(ITER_DEST, ubuf, len, &iter);
if (unlikely(ret))
return ret;
return get_random_bytes_user(&iter);
@@ -1491,7 +1490,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return 0;
case RNDADDENTROPY: {
struct iov_iter iter;
- struct iovec iov;
ssize_t ret;
int len;
@@ -1503,7 +1501,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(len, p++))
return -EFAULT;
- ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
+ ret = import_ubuf(ITER_SOURCE, p, len, &iter);
if (unlikely(ret))
return ret;
ret = write_pool_user(&iter);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 44b19e696176..3d5e6d705fc6 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -108,8 +108,9 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0;
}
- cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
- cn_filter, (void *)filter_data);
+ if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
+ cn_filter, (void *)filter_data) == -ESRCH)
+ atomic_set(&proc_event_num_listeners, 0);
local_unlock(&local_event.lock);
}
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 8afefdea4d80..ce5a5641b6dd 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
@@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fe08ca419b3d..64420d9cfd1e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/time.h>
@@ -27,12 +26,6 @@
#include <acpi/cppc_acpi.h>
-/* Minimum struct length needed for the DMI processor entry we want */
-#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
-
-/* Offset in the DMI processor structure for the max frequency */
-#define DMI_PROCESSOR_MAX_SPEED 0x14
-
/*
* This list contains information parsed from per CPU ACPI _CPC and _PSD
* structures: e.g. the highest and lowest supported performance, capabilities,
@@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void)
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-/* Callback function used to retrieve the max frequency from DMI */
-static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
-{
- const u8 *dmi_data = (const u8 *)dm;
- u16 *mhz = (u16 *)private;
-
- if (dm->type == DMI_ENTRY_PROCESSOR &&
- dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
- u16 val = (u16)get_unaligned((const u16 *)
- (dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
- }
-}
-
-/* Look up the max frequency in DMI */
-static u64 cppc_get_dmi_max_khz(void)
-{
- u16 mhz = 0;
-
- dmi_walk(cppc_find_dmi_mhz, &mhz);
-
- /*
- * Real stupid fallback value, just in case there is no
- * actual value set.
- */
- mhz = mhz ? mhz : 1;
-
- return (1000 * mhz);
-}
-
-/*
- * If CPPC lowest_freq and nominal_freq registers are exposed then we can
- * use them to convert perf to freq and vice versa. The conversion is
- * extrapolated as an affine function passing by the 2 points:
- * - (Low perf, Low freq)
- * - (Nominal perf, Nominal perf)
- */
-static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
- unsigned int perf)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
- div = caps->nominal_perf - caps->lowest_perf;
- offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = max_khz;
- div = caps->highest_perf;
- }
-
- retval = offset + div64_u64(perf * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
-static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
- unsigned int freq)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
- offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = caps->highest_perf;
- div = max_khz;
- }
-
- retval = offset + div64_u64(freq * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
-
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
@@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
u32 desired_perf;
int ret = 0;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
return ret;
@@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
u32 desired_perf;
int ret;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
@@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
perf = step * perf_step;
}
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
/*
@@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
}
@@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
@@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- /* Convert the lowest and nominal freq from MHz to KHz */
- cpu_data->perf_caps.lowest_freq *= 1000;
- cpu_data->perf_caps.nominal_freq *= 1000;
-
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
@@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
- policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
- policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
+ policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
- policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
+ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -863,7 +760,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
@@ -878,11 +775,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
}
if (state)
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->highest_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
@@ -937,7 +832,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
if (ret < 0)
return -EIO;
- return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
}
static void cppc_check_hisi_workaround(void)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 934d35f570b7..44db4f59c4cc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus,
policy->cur,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
@@ -2174,7 +2174,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
policy->cur = freq;
arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index a534a1f7f1ee..3c69040920b8 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1692,13 +1692,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
/*
- * If this CPU gen doesn't call for change in balance_perf
- * EPP return.
- */
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
- return;
-
- /*
* If the EPP is set by firmware, which means that firmware enabled HWP
* - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
@@ -1711,6 +1704,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
}
/*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
* Use hard coded value per gen to update the balance_perf
* and default EPP.
*/
@@ -2406,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(EMERALDRAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index c8a7ccc42c16..4ee23f4ebf4a 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -334,8 +334,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
- if (of_property_present(dev->of_node, "#clock-cells"))
- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (of_property_present(dev->of_node, "#clock-cells")) {
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
+ }
#endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index e66df22f9695..d8515d5c0853 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally");
static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
static enum cpuhp_state haltpoll_hp_state;
-static int default_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int default_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
- if (current_clr_polling_and_test()) {
- local_irq_enable();
+ if (current_clr_polling_and_test())
return index;
- }
+
arch_cpu_idle();
return index;
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index fcaccd0b5a65..e4d3f45242f6 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -906,7 +906,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
/*
* The length of the ID shouldn't be assumed by software since
* it may change in the future. The allocation size is limited
- * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator.
+ * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator.
* If the allocation fails, simply return ENOMEM rather than
* warning in the kernel log.
*/
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3df7a256e919..5c1012d7ffa9 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
HISI_ACC_SGL_ALIGN_SIZE);
/*
- * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER,
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/
- block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ?
- PAGE_SHIFT + MAX_ORDER : 31);
+ block_size = 1 << (PAGE_SHIFT + MAX_PAGE_ORDER < 32 ?
+ PAGE_SHIFT + MAX_PAGE_ORDER : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 1659b787b65f..1ff1ab5fa105 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -367,6 +367,7 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr,
.dax_region = dax_region,
.size = 0,
.id = -1,
+ .memmap_on_memory = false,
};
struct dev_dax *dev_dax = devm_create_dev_dax(&data);
@@ -1400,6 +1401,8 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
dev_dax->align = dax_region->align;
ida_init(&dev_dax->ida);
+ dev_dax->memmap_on_memory = data->memmap_on_memory;
+
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
dev->bus = &dax_bus_type;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 1ccd23360124..cbbf64443098 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -23,6 +23,7 @@ struct dev_dax_data {
struct dev_pagemap *pgmap;
resource_size_t size;
int id;
+ bool memmap_on_memory;
};
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index 8bc9d04034d6..c696837ab23c 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -26,6 +26,7 @@ static int cxl_dax_region_probe(struct device *dev)
.dax_region = dax_region,
.id = -1,
.size = range_len(&cxlr_dax->hpa_range),
+ .memmap_on_memory = true,
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 27cf2daaaa79..446617b73aea 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -70,6 +70,7 @@ struct dev_dax {
struct ida ida;
struct device dev;
struct dev_pagemap *pgmap;
+ bool memmap_on_memory;
int nr_range;
struct dev_dax_range {
unsigned long pgoff;
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5d2ddef0f8f5..b9da69f92697 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -36,6 +36,7 @@ static int dax_hmem_probe(struct platform_device *pdev)
.dax_region = dax_region,
.id = -1,
.size = region_idle ? 0 : range_len(&mri->range),
+ .memmap_on_memory = false,
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 369c698b7706..42ee360cf4e3 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/memory-tiers.h>
+#include <linux/memory_hotplug.h>
#include "dax-private.h"
#include "bus.h"
@@ -93,6 +94,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
+ mhp_t mhp_flags;
int numa_node;
int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
@@ -179,12 +181,16 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
*/
res->flags = IORESOURCE_SYSTEM_RAM;
+ mhp_flags = MHP_NID_IS_MGID;
+ if (dev_dax->memmap_on_memory)
+ mhp_flags |= MHP_MEMMAP_ON_MEMORY;
+
/*
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
rc = add_memory_driver_managed(data->mgid, range.start,
- range_len(&range), kmem_name, MHP_NID_IS_MGID);
+ range_len(&range), kmem_name, mhp_flags);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index ae0cb113a5d3..f3c6c67b8412 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -63,6 +63,7 @@ static struct dev_dax *__dax_pmem_probe(struct device *dev)
.id = id,
.pgmap = &pgmap,
.size = range_len(&range),
+ .memmap_on_memory = false,
};
return devm_create_dev_dax(&data);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index b3a68d5833bd..98657d3b9435 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work)
if (err)
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+ if (devfreq->stop_polling)
+ goto out;
+
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
- mutex_unlock(&devfreq->lock);
+out:
+ mutex_unlock(&devfreq->lock);
trace_devfreq_monitor(devfreq);
}
@@ -483,6 +487,10 @@ void devfreq_monitor_start(struct devfreq *devfreq)
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
return;
+ mutex_lock(&devfreq->lock);
+ if (delayed_work_pending(&devfreq->work))
+ goto out;
+
switch (devfreq->profile->timer) {
case DEVFREQ_TIMER_DEFERRABLE:
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
@@ -491,12 +499,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
break;
default:
- return;
+ goto out;
}
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
+
+out:
+ devfreq->stop_polling = false;
+ mutex_unlock(&devfreq->lock);
}
EXPORT_SYMBOL(devfreq_monitor_start);
@@ -513,6 +525,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
return;
+ mutex_lock(&devfreq->lock);
+ if (devfreq->stop_polling) {
+ mutex_unlock(&devfreq->lock);
+ return;
+ }
+
+ devfreq->stop_polling = true;
+ mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -1688,7 +1708,7 @@ static ssize_t trans_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *df = to_devfreq(dev);
- ssize_t len;
+ ssize_t len = 0;
int i, j;
unsigned int max_state;
@@ -1697,7 +1717,7 @@ static ssize_t trans_stat_show(struct device *dev,
max_state = df->max_state;
if (max_state == 0)
- return sprintf(buf, "Not Supported.\n");
+ return sysfs_emit(buf, "Not Supported.\n");
mutex_lock(&df->lock);
if (!df->stop_polling &&
@@ -1707,31 +1727,49 @@ static ssize_t trans_stat_show(struct device *dev,
}
mutex_unlock(&df->lock);
- len = sprintf(buf, " From : To\n");
- len += sprintf(buf + len, " :");
- for (i = 0; i < max_state; i++)
- len += sprintf(buf + len, "%10lu",
- df->freq_table[i]);
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " :");
+ for (i = 0; i < max_state; i++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10lu",
+ df->freq_table[i]);
+ }
- len += sprintf(buf + len, " time(ms)\n");
+ if (len >= PAGE_SIZE - 1)
+ return PAGE_SIZE - 1;
+ len += sysfs_emit_at(buf, len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
- if (df->freq_table[i] == df->previous_freq)
- len += sprintf(buf + len, "*");
+ if (len >= PAGE_SIZE - 1)
+ break;
+ if (df->freq_table[2] == df->previous_freq)
+ len += sysfs_emit_at(buf, len, "*");
else
- len += sprintf(buf + len, " ");
-
- len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
- for (j = 0; j < max_state; j++)
- len += sprintf(buf + len, "%10u",
+ len += sysfs_emit_at(buf, len, " ");
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]);
+ for (j = 0; j < max_state; j++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
+ }
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10llu\n", (u64)
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
+ }
- len += sprintf(buf + len, "%10llu\n", (u64)
- jiffies64_to_msecs(df->stats.time_in_state[i]));
+ if (len < PAGE_SIZE - 1)
+ len += sysfs_emit_at(buf, len, "Total transition : %u\n",
+ df->stats.total_trans);
+ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
}
- len += sprintf(buf + len, "Total transition : %u\n",
- df->stats.total_trans);
return len;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 8b31cd54bdb6..ae17ce4d9722 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/uaccess.h>
@@ -279,7 +280,6 @@ release:
static int altr_sdram_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct altr_sdram_mc_data *drvdata;
@@ -290,10 +290,6 @@ static int altr_sdram_probe(struct platform_device *pdev)
int irq, irq2, res = 0;
unsigned long mem_size, irqflags = 0;
- id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
/* Grab the register range from the sdr controller in device tree */
mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sdr-syscon");
@@ -304,8 +300,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
}
/* Check specific dependencies for the module */
- priv = of_match_node(altr_sdram_ctrl_of_match,
- pdev->dev.of_node)->data;
+ priv = device_get_match_data(&pdev->dev);
/* Validate the SDRAM controller has ECC enabled */
if (regmap_read(mc_vbase, priv->ecc_ctrl_offset, &read_reg) ||
@@ -459,15 +454,13 @@ free:
return res;
}
-static int altr_sdram_remove(struct platform_device *pdev)
+static void altr_sdram_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
/*
@@ -489,7 +482,7 @@ static const struct dev_pm_ops altr_sdram_pm_ops = {
static struct platform_driver altr_sdram_edac_driver = {
.probe = altr_sdram_probe,
- .remove = altr_sdram_remove,
+ .remove_new = altr_sdram_remove,
.driver = {
.name = "altr_sdram_edac",
#ifdef CONFIG_PM
@@ -812,7 +805,7 @@ fail:
return res;
}
-static int altr_edac_device_remove(struct platform_device *pdev)
+static void altr_edac_device_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct altr_edac_device_dev *drvdata = dci->pvt_info;
@@ -820,13 +813,11 @@ static int altr_edac_device_remove(struct platform_device *pdev)
debugfs_remove_recursive(drvdata->debugfs_dir);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
-
- return 0;
}
static struct platform_driver altr_edac_device_driver = {
.probe = altr_edac_device_probe,
- .remove = altr_edac_device_remove,
+ .remove_new = altr_edac_device_remove,
.driver = {
.name = "altr_edac_device",
.of_match_table = altr_edac_device_of_match,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9b6642d00871..537b9987a431 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -996,15 +996,23 @@ static struct local_node_map {
#define LNTM_NODE_COUNT GENMASK(27, 16)
#define LNTM_BASE_NODE_ID GENMASK(11, 0)
-static int gpu_get_node_map(void)
+static int gpu_get_node_map(struct amd64_pvt *pvt)
{
struct pci_dev *pdev;
int ret;
u32 tmp;
/*
- * Node ID 0 is reserved for CPUs.
- * Therefore, a non-zero Node ID means we've already cached the values.
+ * Mapping of nodes from hardware-provided AMD Node ID to a
+ * Linux logical one is applicable for MI200 models. Therefore,
+ * return early for other heterogeneous systems.
+ */
+ if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3)
+ return 0;
+
+ /*
+ * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID
+ * means the values have been already cached.
*/
if (gpu_node_map.base_node_id)
return 0;
@@ -3851,7 +3859,7 @@ static void gpu_init_csrows(struct mem_ctl_info *mci)
dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
dimm->edac_mode = EDAC_SECDED;
- dimm->mtype = MEM_HBM2;
+ dimm->mtype = pvt->dram_type;
dimm->dtype = DEV_X16;
dimm->grain = 64;
}
@@ -3880,7 +3888,7 @@ static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
return true;
}
-static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
+static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
{
/*
* On CPUs, there is one channel per UMC, so UMC numbering equals
@@ -3893,13 +3901,16 @@ static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
* On GPU nodes channels are selected in 3rd nibble
* HBM chX[3:0]= [Y ]5X[3:0]000;
* HBM chX[7:4]= [Y+1]5X[3:0]000
+ *
+ * On MI300 APU nodes, same as GPU nodes but channels are selected
+ * in the base address of 0x90000
*/
umc *= 2;
if (channel >= 4)
umc++;
- return 0x50000 + (umc << 20) + ((channel % 4) << 12);
+ return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12);
}
static void gpu_read_mc_regs(struct amd64_pvt *pvt)
@@ -3910,7 +3921,7 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt)
/* Read registers from each UMC */
for_each_umc(i) {
- umc_base = gpu_get_umc_base(i, 0);
+ umc_base = gpu_get_umc_base(pvt, i, 0);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
@@ -3927,7 +3938,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt)
for_each_umc(umc) {
for_each_chip_select(cs, umc, pvt) {
- base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
+ base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR;
base = &pvt->csels[umc].csbases[cs];
if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
@@ -3935,7 +3946,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt)
umc, cs, *base, base_reg);
}
- mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
+ mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK;
mask = &pvt->csels[umc].csmasks[cs];
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
@@ -3960,7 +3971,7 @@ static int gpu_hw_info_get(struct amd64_pvt *pvt)
{
int ret;
- ret = gpu_get_node_map();
+ ret = gpu_get_node_map(pvt);
if (ret)
return ret;
@@ -4125,6 +4136,8 @@ static int per_family_init(struct amd64_pvt *pvt)
if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
pvt->ctl_name = "MI200";
pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM2;
+ pvt->gpu_umc_base = 0x50000;
pvt->ops = &gpu_ops;
} else {
pvt->ctl_name = "F19h_M30h";
@@ -4142,6 +4155,13 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->ctl_name = "F19h_M70h";
pvt->flags.zn_regs_v2 = 1;
break;
+ case 0x90 ... 0x9f:
+ pvt->ctl_name = "F19h_M90h";
+ pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM3;
+ pvt->gpu_umc_base = 0x90000;
+ pvt->ops = &gpu_ops;
+ break;
case 0xa0 ... 0xaf:
pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
@@ -4180,23 +4200,33 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
+/*
+ * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
+ * should be swapped to fit into the layers.
+ */
+static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
+{
+ bool is_gpu = (pvt->ops == &gpu_ops);
+
+ if (!layer)
+ return is_gpu ? pvt->max_mcs
+ : pvt->csels[0].b_cnt;
+ else
+ return is_gpu ? pvt->csels[0].b_cnt
+ : pvt->max_mcs;
+}
+
static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int ret = -ENOMEM;
- /*
- * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should
- * be swapped to fit into the layers.
- */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
- pvt->max_mcs : pvt->csels[0].b_cnt;
+ layers[0].size = get_layer_size(pvt, 0);
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
- pvt->csels[0].b_cnt : pvt->max_mcs;
+ layers[1].size = get_layer_size(pvt, 1);
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 5a4e4a59682b..1665f7932bac 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -362,6 +362,7 @@ struct amd64_pvt {
u32 dct_sel_lo; /* DRAM Controller Select Low */
u32 dct_sel_hi; /* DRAM Controller Select High */
u32 online_spare; /* On-Line spare Reg */
+ u32 gpu_umc_base; /* Base address used for channel selection on GPUs */
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index c4bd2fb9c46b..25517c99b3ea 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -5,7 +5,9 @@
#include <linux/kernel.h>
#include <linux/edac.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
@@ -351,20 +353,18 @@ static int axp_mc_probe(struct platform_device *pdev)
return 0;
}
-static int axp_mc_remove(struct platform_device *pdev)
+static void axp_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
- .remove = axp_mc_remove,
+ .remove_new = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
@@ -564,7 +564,7 @@ static int aurora_l2_probe(struct platform_device *pdev)
return 0;
}
-static int aurora_l2_remove(struct platform_device *pdev)
+static void aurora_l2_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
#ifdef CONFIG_EDAC_DEBUG
@@ -575,13 +575,11 @@ static int aurora_l2_remove(struct platform_device *pdev)
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
- .remove = aurora_l2_remove,
+ .remove_new = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 6bd5f8815919..157a480eb761 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -357,7 +357,7 @@ probe_exit02:
}
-static int aspeed_remove(struct platform_device *pdev)
+static void aspeed_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
@@ -369,8 +369,6 @@ static int aspeed_remove(struct platform_device *pdev)
mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
-
- return 0;
}
@@ -389,7 +387,7 @@ static struct platform_driver aspeed_driver = {
.of_match_table = aspeed_of_match
},
.probe = aspeed_probe,
- .remove = aspeed_remove
+ .remove_new = aspeed_remove
};
module_platform_driver(aspeed_driver);
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index e4736eb37bfb..5b3164560648 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -323,14 +323,12 @@ err:
}
-static int bluefield_edac_mc_remove(struct platform_device *pdev)
+static void bluefield_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
@@ -346,7 +344,7 @@ static struct platform_driver bluefield_edac_mc_driver = {
.acpi_match_table = bluefield_mc_acpi_ids,
},
.probe = bluefield_edac_mc_probe,
- .remove = bluefield_edac_mc_remove,
+ .remove_new = bluefield_edac_mc_remove,
};
module_platform_driver(bluefield_edac_mc_driver);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index bc1f3416400e..2000f66fbf5c 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -234,12 +234,11 @@ static int cell_edac_probe(struct platform_device *pdev)
return 0;
}
-static int cell_edac_remove(struct platform_device *pdev)
+static void cell_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
- return 0;
}
static struct platform_driver cell_edac_driver = {
@@ -247,7 +246,7 @@ static struct platform_driver cell_edac_driver = {
.name = "cbe-mic",
},
.probe = cell_edac_probe,
- .remove = cell_edac_remove,
+ .remove_new = cell_edac_remove,
};
static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 9797e6d60dde..5075dc7526e3 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -1010,7 +1010,7 @@ out:
return res;
}
-static int cpc925_remove(struct platform_device *pdev)
+static void cpc925_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
@@ -1023,13 +1023,11 @@ static int cpc925_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver cpc925_edac_driver = {
.probe = cpc925_probe,
- .remove = cpc925_remove,
+ .remove_new = cpc925_remove,
.driver = {
.name = "cpc925_edac",
}
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index 1fa5ca57e9ec..4e30b989a1a4 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -602,7 +602,7 @@ err:
return ret;
}
-static int dmc520_edac_remove(struct platform_device *pdev)
+static void dmc520_edac_remove(struct platform_device *pdev)
{
u32 reg_val, idx, irq_mask_all = 0;
struct mem_ctl_info *mci;
@@ -626,8 +626,6 @@ static int dmc520_edac_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static const struct of_device_id dmc520_edac_driver_id[] = {
@@ -644,7 +642,7 @@ static struct platform_driver dmc520_edac_driver = {
},
.probe = dmc520_edac_probe,
- .remove = dmc520_edac_remove
+ .remove_new = dmc520_edac_remove
};
module_platform_driver(dmc520_edac_driver);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6faeb2ab3960..d6eed727b0cd 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -166,6 +166,7 @@ const char * const edac_mem_types[] = {
[MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_WIO2] = "Wide-IO-2",
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
+ [MEM_HBM3] = "High-bandwidth-memory-Gen3",
};
EXPORT_SYMBOL_GPL(edac_mem_types);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 287cc51dbc86..901d4cd3ca38 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -521,7 +521,7 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
/* read the device TYPE, looking for bridges */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE)
get_pci_parity_status(dev, 1);
}
@@ -583,7 +583,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
header_type, dev_name(&dev->dev));
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index b81757555a8a..d148d262d0d4 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -612,7 +612,7 @@ err:
return res;
}
-int fsl_mc_err_remove(struct platform_device *op)
+void fsl_mc_err_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
@@ -629,5 +629,4 @@ int fsl_mc_err_remove(struct platform_device *op)
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
- return 0;
}
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index 332439d7b2d9..c0994a2a003c 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -72,5 +72,5 @@ struct fsl_mc_pdata {
int irq;
};
int fsl_mc_err_probe(struct platform_device *op);
-int fsl_mc_err_remove(struct platform_device *op);
+void fsl_mc_err_remove(struct platform_device *op);
#endif
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 140d4431bd0d..5646c049a934 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -118,18 +118,17 @@ err:
return res;
}
-static int highbank_l2_err_remove(struct platform_device *pdev)
+static void highbank_l2_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
- return 0;
}
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
- .remove = highbank_l2_err_remove,
+ .remove_new = highbank_l2_err_remove,
.driver = {
.name = "hb_l2_edac",
.of_match_table = hb_l2_err_of_match,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index a0c04a7f95e9..1c5b888ab11d 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -251,18 +251,17 @@ free:
return res;
}
-static int highbank_mc_remove(struct platform_device *pdev)
+static void highbank_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
- return 0;
}
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
- .remove = highbank_mc_remove,
+ .remove_new = highbank_mc_remove,
.driver = {
.name = "hb_mc_edac",
.of_match_table = hb_ddr_ctrl_of_match,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 23d25724bae4..91e0a88ef904 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -376,7 +376,7 @@ static const struct pci_id_table pci_dev_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/*
@@ -385,7 +385,7 @@ static const struct pci_id_table pci_dev_table[] = {
static const struct pci_device_id i7core_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
- {0,} /* 0 terminated list. */
+ { 0, }
};
/****************************************************************************
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 1a18693294db..2b0ecdeba5cd 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -58,6 +58,7 @@
/* Capability register E */
#define CAPID_E_OFFSET 0xf0
#define CAPID_E_IBECC BIT(12)
+#define CAPID_E_IBECC_BIT18 BIT(18)
/* Error Status */
#define ERRSTS_OFFSET 0xc8
@@ -80,6 +81,7 @@
#define ECC_ERROR_LOG_UE BIT_ULL(63)
#define ECC_ERROR_LOG_ADDR_SHIFT 5
#define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38)
+#define ECC_ERROR_LOG_ADDR45(v) GET_BITFIELD(v, 5, 45)
#define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61)
/* Host MMIO base address */
@@ -133,6 +135,8 @@ static struct res_config {
u32 ibecc_base;
u32 ibecc_error_log_offset;
bool (*ibecc_available)(struct pci_dev *pdev);
+ /* Extract error address logged in IBECC */
+ u64 (*err_addr)(u64 ecclog);
/* Convert error address logged in IBECC to system physical address */
u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
/* Convert error address logged in IBECC to integrated memory controller address */
@@ -222,6 +226,67 @@ static struct work_struct ecclog_work;
#define DID_ADL_SKU3 0x4621
#define DID_ADL_SKU4 0x4641
+/* Compute die IDs for Alder Lake-N with IBECC */
+#define DID_ADL_N_SKU1 0x4614
+#define DID_ADL_N_SKU2 0x4617
+#define DID_ADL_N_SKU3 0x461b
+#define DID_ADL_N_SKU4 0x461c
+#define DID_ADL_N_SKU5 0x4673
+#define DID_ADL_N_SKU6 0x4674
+#define DID_ADL_N_SKU7 0x4675
+#define DID_ADL_N_SKU8 0x4677
+#define DID_ADL_N_SKU9 0x4678
+#define DID_ADL_N_SKU10 0x4679
+#define DID_ADL_N_SKU11 0x467c
+
+/* Compute die IDs for Raptor Lake-P with IBECC */
+#define DID_RPL_P_SKU1 0xa706
+#define DID_RPL_P_SKU2 0xa707
+#define DID_RPL_P_SKU3 0xa708
+#define DID_RPL_P_SKU4 0xa716
+#define DID_RPL_P_SKU5 0xa718
+
+/* Compute die IDs for Meteor Lake-PS with IBECC */
+#define DID_MTL_PS_SKU1 0x7d21
+#define DID_MTL_PS_SKU2 0x7d22
+#define DID_MTL_PS_SKU3 0x7d23
+#define DID_MTL_PS_SKU4 0x7d24
+
+/* Compute die IDs for Meteor Lake-P with IBECC */
+#define DID_MTL_P_SKU1 0x7d01
+#define DID_MTL_P_SKU2 0x7d02
+#define DID_MTL_P_SKU3 0x7d14
+
+static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
+{
+ union {
+ u64 v;
+ struct {
+ u32 v_lo;
+ u32 v_hi;
+ };
+ } u;
+
+ if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
+ igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
+ return -ENODEV;
+ }
+
+ if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
+ igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
+ return -ENODEV;
+ }
+
+ if (!(u.v & MCHBAR_EN)) {
+ igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
+ return -ENODEV;
+ }
+
+ *mchbar = MCHBAR_BASE(u.v);
+
+ return 0;
+}
+
static bool ehl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
@@ -272,6 +337,39 @@ static bool tgl_ibecc_available(struct pci_dev *pdev)
return !(CAPID_E_IBECC & v);
}
+static bool mtl_p_ibecc_available(struct pci_dev *pdev)
+{
+ u32 v;
+
+ if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
+ return false;
+
+ return !(CAPID_E_IBECC_BIT18 & v);
+}
+
+static bool mtl_ps_ibecc_available(struct pci_dev *pdev)
+{
+#define MCHBAR_MEMSS_IBECCDIS 0x13c00
+ void __iomem *window;
+ u64 mchbar;
+ u32 val;
+
+ if (get_mchbar(pdev, &mchbar))
+ return false;
+
+ window = ioremap(mchbar, MCHBAR_SIZE * 2);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
+ return false;
+ }
+
+ val = readl(window + MCHBAR_MEMSS_IBECCDIS);
+ iounmap(window);
+
+ /* Bit6: 1 - IBECC is disabled, 0 - IBECC isn't disabled */
+ return !GET_BITFIELD(val, 6, 6);
+}
+
static u64 mem_addr_to_sys_addr(u64 maddr)
{
if (maddr < igen6_tolud)
@@ -358,6 +456,11 @@ static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
return imc_addr;
}
+static u64 rpl_p_err_addr(u64 ecclog)
+{
+ return ECC_ERROR_LOG_ADDR45(ecclog);
+}
+
static struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
@@ -403,6 +506,51 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
+static struct res_config adl_n_cfg = {
+ .machine_check = true,
+ .num_imc = 1,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x68,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config rpl_p_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x68,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr = rpl_p_err_addr,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config mtl_ps_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_ps_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config mtl_p_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_p_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
@@ -424,6 +572,29 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU2), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU3), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU4), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU5), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU6), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU7), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU8), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU4), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU5), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU1), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU2), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU3), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU4), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -679,8 +850,11 @@ static void ecclog_work_cb(struct work_struct *work)
llist_for_each_entry_safe(node, tmp, head, llnode) {
memset(&res, 0, sizeof(res));
- eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
- ECC_ERROR_LOG_ADDR_SHIFT;
+ if (res_cfg->err_addr)
+ eaddr = res_cfg->err_addr(node->ecclog);
+ else
+ eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
+ ECC_ERROR_LOG_ADDR_SHIFT;
res.mc = node->mc;
res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
@@ -969,22 +1143,8 @@ static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar)
igen6_tom = u.v & GENMASK_ULL(38, 20);
- if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
- igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
+ if (get_mchbar(pdev, mchbar))
goto fail;
- }
-
- if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
- igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
- goto fail;
- }
-
- if (!(u.v & MCHBAR_EN)) {
- igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
- goto fail;
- }
-
- *mchbar = MCHBAR_BASE(u.v);
#ifdef CONFIG_EDAC_DEBUG
if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo))
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 7c5e2b3c0daa..d2f895033280 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -27,7 +27,7 @@ MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
static struct platform_driver fsl_ddr_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove = fsl_mc_err_remove,
+ .remove_new = fsl_mc_err_remove,
.driver = {
.name = "fsl_ddr_mc_err",
.of_match_table = fsl_ddr_mc_err_of_match,
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 9215c06783df..ec8b6c9fedfd 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -143,482 +143,6 @@ static const char * const mc6_mce_desc[] = {
"Status Register File",
};
-/* Scalable MCA error strings */
-static const char * const smca_ls_mce_desc[] = {
- "Load queue parity error",
- "Store queue parity error",
- "Miss address buffer payload parity error",
- "Level 1 TLB parity error",
- "DC Tag error type 5",
- "DC Tag error type 6",
- "DC Tag error type 1",
- "Internal error type 1",
- "Internal error type 2",
- "System Read Data Error Thread 0",
- "System Read Data Error Thread 1",
- "DC Tag error type 2",
- "DC Data error type 1 and poison consumption",
- "DC Data error type 2",
- "DC Data error type 3",
- "DC Tag error type 4",
- "Level 2 TLB parity error",
- "PDC parity error",
- "DC Tag error type 3",
- "DC Tag error type 5",
- "L2 Fill Data error",
-};
-
-static const char * const smca_ls2_mce_desc[] = {
- "An ECC error was detected on a data cache read by a probe or victimization",
- "An ECC error or L2 poison was detected on a data cache read by a load",
- "An ECC error was detected on a data cache read-modify-write by a store",
- "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
- "An ECC error or poison bit mismatch was detected on a tag read by a load",
- "An ECC error or poison bit mismatch was detected on a tag read by a store",
- "An ECC error was detected on an EMEM read by a load",
- "An ECC error was detected on an EMEM read-modify-write by a store",
- "A parity error was detected in an L1 TLB entry by any access",
- "A parity error was detected in an L2 TLB entry by any access",
- "A parity error was detected in a PWC entry by any access",
- "A parity error was detected in an STQ entry by any access",
- "A parity error was detected in an LDQ entry by any access",
- "A parity error was detected in a MAB entry by any access",
- "A parity error was detected in an SCB entry state field by any access",
- "A parity error was detected in an SCB entry address field by any access",
- "A parity error was detected in an SCB entry data field by any access",
- "A parity error was detected in a WCB entry by any access",
- "A poisoned line was detected in an SCB entry by any access",
- "A SystemReadDataError error was reported on read data returned from L2 for a load",
- "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
- "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
- "A hardware assertion error was reported",
- "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
-};
-
-static const char * const smca_if_mce_desc[] = {
- "Op Cache Microtag Probe Port Parity Error",
- "IC Microtag or Full Tag Multi-hit Error",
- "IC Full Tag Parity Error",
- "IC Data Array Parity Error",
- "Decoupling Queue PhysAddr Parity Error",
- "L0 ITLB Parity Error",
- "L1 ITLB Parity Error",
- "L2 ITLB Parity Error",
- "BPQ Thread 0 Snoop Parity Error",
- "BPQ Thread 1 Snoop Parity Error",
- "L1 BTB Multi-Match Error",
- "L2 BTB Multi-Match Error",
- "L2 Cache Response Poison Error",
- "System Read Data Error",
- "Hardware Assertion Error",
- "L1-TLB Multi-Hit",
- "L2-TLB Multi-Hit",
- "BSR Parity Error",
- "CT MCE",
-};
-
-static const char * const smca_l2_mce_desc[] = {
- "L2M Tag Multiple-Way-Hit error",
- "L2M Tag or State Array ECC Error",
- "L2M Data Array ECC Error",
- "Hardware Assert Error",
-};
-
-static const char * const smca_de_mce_desc[] = {
- "Micro-op cache tag parity error",
- "Micro-op cache data parity error",
- "Instruction buffer parity error",
- "Micro-op queue parity error",
- "Instruction dispatch queue parity error",
- "Fetch address FIFO parity error",
- "Patch RAM data parity error",
- "Patch RAM sequencer parity error",
- "Micro-op buffer parity error",
- "Hardware Assertion MCA Error",
-};
-
-static const char * const smca_ex_mce_desc[] = {
- "Watchdog Timeout error",
- "Physical register file parity error",
- "Flag register file parity error",
- "Immediate displacement register file parity error",
- "Address generator payload parity error",
- "EX payload parity error",
- "Checkpoint queue parity error",
- "Retire dispatch queue parity error",
- "Retire status queue parity error",
- "Scheduling queue parity error",
- "Branch buffer queue parity error",
- "Hardware Assertion error",
- "Spec Map parity error",
- "Retire Map parity error",
-};
-
-static const char * const smca_fp_mce_desc[] = {
- "Physical register file (PRF) parity error",
- "Freelist (FL) parity error",
- "Schedule queue parity error",
- "NSQ parity error",
- "Retire queue (RQ) parity error",
- "Status register file (SRF) parity error",
- "Hardware assertion",
-};
-
-static const char * const smca_l3_mce_desc[] = {
- "Shadow Tag Macro ECC Error",
- "Shadow Tag Macro Multi-way-hit Error",
- "L3M Tag ECC Error",
- "L3M Tag Multi-way-hit Error",
- "L3M Data ECC Error",
- "SDP Parity Error or SystemReadDataError from XI",
- "L3 Victim Queue Parity Error",
- "L3 Hardware Assertion",
-};
-
-static const char * const smca_cs_mce_desc[] = {
- "Illegal Request",
- "Address Violation",
- "Security Violation",
- "Illegal Response",
- "Unexpected Response",
- "Request or Probe Parity Error",
- "Read Response Parity Error",
- "Atomic Request Parity Error",
- "Probe Filter ECC Error",
-};
-
-static const char * const smca_cs2_mce_desc[] = {
- "Illegal Request",
- "Address Violation",
- "Security Violation",
- "Illegal Response",
- "Unexpected Response",
- "Request or Probe Parity Error",
- "Read Response Parity Error",
- "Atomic Request Parity Error",
- "SDP read response had no match in the CS queue",
- "Probe Filter Protocol Error",
- "Probe Filter ECC Error",
- "SDP read response had an unexpected RETRY error",
- "Counter overflow error",
- "Counter underflow error",
-};
-
-static const char * const smca_pie_mce_desc[] = {
- "Hardware Assert",
- "Register security violation",
- "Link Error",
- "Poison data consumption",
- "A deferred error was detected in the DF"
-};
-
-static const char * const smca_umc_mce_desc[] = {
- "DRAM ECC error",
- "Data poison error",
- "SDP parity error",
- "Advanced peripheral bus error",
- "Address/Command parity error",
- "Write data CRC error",
- "DCQ SRAM ECC error",
- "AES SRAM ECC error",
-};
-
-static const char * const smca_umc2_mce_desc[] = {
- "DRAM ECC error",
- "Data poison error",
- "SDP parity error",
- "Reserved",
- "Address/Command parity error",
- "Write data parity error",
- "DCQ SRAM ECC error",
- "Reserved",
- "Read data parity error",
- "Rdb SRAM ECC error",
- "RdRsp SRAM ECC error",
- "LM32 MP errors",
-};
-
-static const char * const smca_pb_mce_desc[] = {
- "An ECC error in the Parameter Block RAM array",
-};
-
-static const char * const smca_psp_mce_desc[] = {
- "An ECC or parity error in a PSP RAM instance",
-};
-
-static const char * const smca_psp2_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Instruction Cache Bank 0 ECC or parity error",
- "Instruction Cache Bank 1 ECC or parity error",
- "Instruction Tag Ram 0 parity error",
- "Instruction Tag Ram 1 parity error",
- "Data Cache Bank 0 ECC or parity error",
- "Data Cache Bank 1 ECC or parity error",
- "Data Cache Bank 2 ECC or parity error",
- "Data Cache Bank 3 ECC or parity error",
- "Data Tag Bank 0 parity error",
- "Data Tag Bank 1 parity error",
- "Data Tag Bank 2 parity error",
- "Data Tag Bank 3 parity error",
- "Dirty Data Ram parity error",
- "TLB Bank 0 parity error",
- "TLB Bank 1 parity error",
- "System Hub Read Buffer ECC or parity error",
-};
-
-static const char * const smca_smu_mce_desc[] = {
- "An ECC or parity error in an SMU RAM instance",
-};
-
-static const char * const smca_smu2_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "System Hub Read Buffer ECC or parity error",
- "PHY RAM ECC error",
-};
-
-static const char * const smca_mp5_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
-};
-
-static const char * const smca_mpdma_mce_desc[] = {
- "Main SRAM [31:0] bank ECC or parity error",
- "Main SRAM [63:32] bank ECC or parity error",
- "Main SRAM [95:64] bank ECC or parity error",
- "Main SRAM [127:96] bank ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "System Hub Read Buffer ECC or parity error",
- "MPDMA TVF DVSEC Memory ECC or parity error",
- "MPDMA TVF MMIO Mailbox0 ECC or parity error",
- "MPDMA TVF MMIO Mailbox1 ECC or parity error",
- "MPDMA TVF Doorbell Memory ECC or parity error",
- "MPDMA TVF SDP Slave Memory 0 ECC or parity error",
- "MPDMA TVF SDP Slave Memory 1 ECC or parity error",
- "MPDMA TVF SDP Slave Memory 2 ECC or parity error",
- "MPDMA TVF SDP Master Memory 0 ECC or parity error",
- "MPDMA TVF SDP Master Memory 1 ECC or parity error",
- "MPDMA TVF SDP Master Memory 2 ECC or parity error",
- "MPDMA TVF SDP Master Memory 3 ECC or parity error",
- "MPDMA TVF SDP Master Memory 4 ECC or parity error",
- "MPDMA TVF SDP Master Memory 5 ECC or parity error",
- "MPDMA TVF SDP Master Memory 6 ECC or parity error",
- "MPDMA PTE Command FIFO ECC or parity error",
- "MPDMA PTE Hub Data FIFO ECC or parity error",
- "MPDMA PTE Internal Data FIFO ECC or parity error",
- "MPDMA PTE Command Memory DMA ECC or parity error",
- "MPDMA PTE Command Memory Internal ECC or parity error",
- "MPDMA PTE DMA Completion FIFO ECC or parity error",
- "MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
- "MPDMA PTE Descriptor Completion FIFO ECC or parity error",
- "MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
- "MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
- "SDP Watchdog Timer expired",
-};
-
-static const char * const smca_nbio_mce_desc[] = {
- "ECC or Parity error",
- "PCIE error",
- "SDP ErrEvent error",
- "SDP Egress Poison Error",
- "IOHC Internal Poison Error",
-};
-
-static const char * const smca_pcie_mce_desc[] = {
- "CCIX PER Message logging",
- "CCIX Read Response with Status: Non-Data Error",
- "CCIX Write Response with Status: Non-Data Error",
- "CCIX Read Response with Status: Data Error",
- "CCIX Non-okay write response with data error",
-};
-
-static const char * const smca_pcie2_mce_desc[] = {
- "SDP Parity Error logging",
-};
-
-static const char * const smca_xgmipcs_mce_desc[] = {
- "Data Loss Error",
- "Training Error",
- "Flow Control Acknowledge Error",
- "Rx Fifo Underflow Error",
- "Rx Fifo Overflow Error",
- "CRC Error",
- "BER Exceeded Error",
- "Tx Vcid Data Error",
- "Replay Buffer Parity Error",
- "Data Parity Error",
- "Replay Fifo Overflow Error",
- "Replay Fifo Underflow Error",
- "Elastic Fifo Overflow Error",
- "Deskew Error",
- "Flow Control CRC Error",
- "Data Startup Limit Error",
- "FC Init Timeout Error",
- "Recovery Timeout Error",
- "Ready Serial Timeout Error",
- "Ready Serial Attempt Error",
- "Recovery Attempt Error",
- "Recovery Relock Attempt Error",
- "Replay Attempt Error",
- "Sync Header Error",
- "Tx Replay Timeout Error",
- "Rx Replay Timeout Error",
- "LinkSub Tx Timeout Error",
- "LinkSub Rx Timeout Error",
- "Rx CMD Packet Error",
-};
-
-static const char * const smca_xgmiphy_mce_desc[] = {
- "RAM ECC Error",
- "ARC instruction buffer parity error",
- "ARC data buffer parity error",
- "PHY APB error",
-};
-
-static const char * const smca_nbif_mce_desc[] = {
- "Timeout error from GMI",
- "SRAM ECC error",
- "NTB Error Event",
- "SDP Parity error",
-};
-
-static const char * const smca_sata_mce_desc[] = {
- "Parity error for port 0",
- "Parity error for port 1",
- "Parity error for port 2",
- "Parity error for port 3",
- "Parity error for port 4",
- "Parity error for port 5",
- "Parity error for port 6",
- "Parity error for port 7",
-};
-
-static const char * const smca_usb_mce_desc[] = {
- "Parity error or ECC error for S0 RAM0",
- "Parity error or ECC error for S0 RAM1",
- "Parity error or ECC error for S0 RAM2",
- "Parity error for PHY RAM0",
- "Parity error for PHY RAM1",
- "AXI Slave Response error",
-};
-
-static const char * const smca_gmipcs_mce_desc[] = {
- "Data Loss Error",
- "Training Error",
- "Replay Parity Error",
- "Rx Fifo Underflow Error",
- "Rx Fifo Overflow Error",
- "CRC Error",
- "BER Exceeded Error",
- "Tx Fifo Underflow Error",
- "Replay Buffer Parity Error",
- "Tx Overflow Error",
- "Replay Fifo Overflow Error",
- "Replay Fifo Underflow Error",
- "Elastic Fifo Overflow Error",
- "Deskew Error",
- "Offline Error",
- "Data Startup Limit Error",
- "FC Init Timeout Error",
- "Recovery Timeout Error",
- "Ready Serial Timeout Error",
- "Ready Serial Attempt Error",
- "Recovery Attempt Error",
- "Recovery Relock Attempt Error",
- "Deskew Abort Error",
- "Rx Buffer Error",
- "Rx LFDS Fifo Overflow Error",
- "Rx LFDS Fifo Underflow Error",
- "LinkSub Tx Timeout Error",
- "LinkSub Rx Timeout Error",
- "Rx CMD Packet Error",
- "LFDS Training Timeout Error",
- "LFDS FC Init Timeout Error",
- "Data Loss Error",
-};
-
-struct smca_mce_desc {
- const char * const *descs;
- unsigned int num_descs;
-};
-
-static struct smca_mce_desc smca_mce_descs[] = {
- [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
- [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
- [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
- [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
- [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
- [SMCA_EX] = { smca_ex_mce_desc, ARRAY_SIZE(smca_ex_mce_desc) },
- [SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) },
- [SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) },
- [SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) },
- [SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
- [SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
- [SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
- [SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
- [SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
- [SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
- [SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
- [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
- [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
- [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
- [SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
- [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
- [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
- [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
- [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
- /* NBIF and SHUB have the same error descriptions, for now. */
- [SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
- [SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
- [SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
- [SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
- [SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
- /* All the PHY bank types have the same error descriptions, for now. */
- [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
- [SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
- [SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
-};
-
static bool f12h_mc0_mce(u16 ec, u8 xec)
{
bool ret = false;
@@ -1163,11 +687,51 @@ static void decode_mc6_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n");
}
+static const char * const smca_long_names[] = {
+ [SMCA_LS ... SMCA_LS_V2] = "Load Store Unit",
+ [SMCA_IF] = "Instruction Fetch Unit",
+ [SMCA_L2_CACHE] = "L2 Cache",
+ [SMCA_DE] = "Decode Unit",
+ [SMCA_RESERVED] = "Reserved",
+ [SMCA_EX] = "Execution Unit",
+ [SMCA_FP] = "Floating Point Unit",
+ [SMCA_L3_CACHE] = "L3 Cache",
+ [SMCA_CS ... SMCA_CS_V2] = "Coherent Slave",
+ [SMCA_PIE] = "Power, Interrupts, etc.",
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = "Unified Memory Controller",
+ [SMCA_UMC_V2] = "Unified Memory Controller v2",
+ [SMCA_PB] = "Parameter Block",
+ [SMCA_PSP ... SMCA_PSP_V2] = "Platform Security Processor",
+ [SMCA_SMU ... SMCA_SMU_V2] = "System Management Unit",
+ [SMCA_MP5] = "Microprocessor 5 Unit",
+ [SMCA_MPDMA] = "MPDMA Unit",
+ [SMCA_NBIO] = "Northbridge IO Unit",
+ [SMCA_PCIE ... SMCA_PCIE_V2] = "PCI Express Unit",
+ [SMCA_XGMI_PCS] = "Ext Global Memory Interconnect PCS Unit",
+ [SMCA_NBIF] = "NBIF Unit",
+ [SMCA_SHUB] = "System Hub Unit",
+ [SMCA_SATA] = "SATA Unit",
+ [SMCA_USB] = "USB Unit",
+ [SMCA_GMI_PCS] = "Global Memory Interconnect PCS Unit",
+ [SMCA_XGMI_PHY] = "Ext Global Memory Interconnect PHY Unit",
+ [SMCA_WAFL_PHY] = "WAFL PHY Unit",
+ [SMCA_GMI_PHY] = "Global Memory Interconnect PHY Unit",
+};
+
+static const char *smca_get_long_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_long_names[t];
+}
+
/* Decode errors according to Scalable MCA specification */
static void decode_smca_error(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
- const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
if (bank_type >= N_SMCA_BANK_TYPES)
@@ -1178,13 +742,7 @@ static void decode_smca_error(struct mce *m)
return;
}
- ip_name = smca_get_long_name(bank_type);
-
- pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec);
-
- /* Only print the decode of valid error codes */
- if (xec < smca_mce_descs[bank_type].num_descs)
- pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);
+ pr_emerg(HW_ERR "%s Ext. Error Code: %d", smca_get_long_name(bank_type), xec);
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 2b5703e5066e..c1bc53f4e184 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -300,7 +300,7 @@ err:
return res;
}
-static int mpc85xx_pci_err_remove(struct platform_device *op)
+static void mpc85xx_pci_err_remove(struct platform_device *op)
{
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
@@ -312,8 +312,6 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
edac_pci_del_device(&op->dev);
edac_pci_free_ctl_info(pci);
-
- return 0;
}
static const struct platform_device_id mpc85xx_pci_err_match[] = {
@@ -325,7 +323,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
- .remove = mpc85xx_pci_err_remove,
+ .remove_new = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
@@ -591,7 +589,7 @@ err:
return res;
}
-static int mpc85xx_l2_err_remove(struct platform_device *op)
+static void mpc85xx_l2_err_remove(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
@@ -606,7 +604,6 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
edac_device_del_device(&op->dev);
edac_device_free_ctl_info(edac_dev);
- return 0;
}
static const struct of_device_id mpc85xx_l2_err_of_match[] = {
@@ -630,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
- .remove = mpc85xx_l2_err_remove,
+ .remove_new = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
.of_match_table = mpc85xx_l2_err_of_match,
@@ -659,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove = fsl_mc_err_remove,
+ .remove_new = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c
index 6d15c1550263..2e2133b784e9 100644
--- a/drivers/edac/npcm_edac.c
+++ b/drivers/edac/npcm_edac.c
@@ -410,7 +410,7 @@ free_edac_mc:
return rc;
}
-static int edac_remove(struct platform_device *pdev)
+static void edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct priv_data *priv = mci->pvt_info;
@@ -426,8 +426,6 @@ static int edac_remove(struct platform_device *pdev)
regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
pdata->int_mask_master_global_mask);
regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0);
-
- return 0;
}
static const struct npcm_platform_data npcm750_edac = {
@@ -533,7 +531,7 @@ static struct platform_driver npcm_edac_driver = {
.of_match_table = npcm_edac_of_match,
},
.probe = edac_probe,
- .remove = edac_remove,
+ .remove_new = edac_remove,
};
module_platform_driver(npcm_edac_driver);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index c33059e9b0be..4015eb9af6fe 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -184,19 +184,17 @@ err:
return -ENXIO;
}
-static int octeon_l2c_remove(struct platform_device *pdev)
+static void octeon_l2c_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(l2c);
-
- return 0;
}
static struct platform_driver octeon_l2c_driver = {
.probe = octeon_l2c_probe,
- .remove = octeon_l2c_remove,
+ .remove_new = octeon_l2c_remove,
.driver = {
.name = "octeon_l2c_edac",
}
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index aeb222ca3ed1..18615cbcd9ea 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -302,18 +302,17 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
-static int octeon_lmc_edac_remove(struct platform_device *pdev)
+static void octeon_lmc_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
- return 0;
}
static struct platform_driver octeon_lmc_edac_driver = {
.probe = octeon_lmc_edac_probe,
- .remove = octeon_lmc_edac_remove,
+ .remove_new = octeon_lmc_edac_remove,
.driver = {
.name = "octeon_lmc_edac",
}
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 754eced59c32..ea8a8e337b1e 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -119,19 +119,18 @@ err:
return -ENXIO;
}
-static int co_cache_error_remove(struct platform_device *pdev)
+static void co_cache_error_remove(struct platform_device *pdev)
{
struct co_cache_error *p = platform_get_drvdata(pdev);
unregister_co_cache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->ed);
- return 0;
}
static struct platform_driver co_cache_error_driver = {
.probe = co_cache_error_probe,
- .remove = co_cache_error_remove,
+ .remove_new = co_cache_error_remove,
.driver = {
.name = "octeon_pc_edac",
}
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 28b238eecefc..108ad9493cfb 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -87,19 +87,17 @@ err:
return res;
}
-static int octeon_pci_remove(struct platform_device *pdev)
+static void octeon_pci_remove(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
edac_pci_del_device(&pdev->dev);
edac_pci_free_ctl_info(pci);
-
- return 0;
}
static struct platform_driver octeon_pci_driver = {
.probe = octeon_pci_probe,
- .remove = octeon_pci_remove,
+ .remove_new = octeon_pci_remove,
.driver = {
.name = "octeon_pci_edac",
}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 2b306f2cc605..2afcd148fcf8 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -16,18 +16,20 @@
* rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
*/
-#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mmzone.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
#include <linux/smp.h>
-#include <linux/bitmap.h>
-#include <linux/math64.h>
-#include <linux/mod_devicetable.h>
+
#include <linux/platform_data/x86/p2sb.h>
#include <asm/cpu_device_id.h>
@@ -109,7 +111,6 @@ static struct mem_ctl_info *pnd2_mci;
#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
#define SELECTOR_DISABLED (-1)
-#define _4GB (1ul << 32)
#define PMI_ADDRESS_WIDTH 31
#define PND_MAX_PHYS_BIT 39
@@ -183,7 +184,7 @@ static int _apl_rd_reg(int port, int off, int op, u32 *data)
}
P2SB_READ(dword, P2SB_DATA_OFF, data);
- ret = (status >> 1) & 0x3;
+ ret = (status >> 1) & GENMASK(1, 0);
out:
/* Hide the P2SB device, if it was hidden before */
if (hidden)
@@ -307,7 +308,7 @@ static bool two_channels; /* Both PMI channels in one slice enabled */
static u8 sym_chan_mask;
static u8 asym_chan_mask;
-static u8 chan_mask;
+static unsigned long chan_mask;
static int slice_selector = -1;
static int chan_selector = -1;
@@ -329,7 +330,7 @@ static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
return;
}
if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
- pr_info(FW_BUG "MOT mask not power of two\n");
+ pr_info(FW_BUG "MOT mask is invalid\n");
return;
}
if (base & ~mask) {
@@ -587,7 +588,7 @@ static int get_registers(void)
/* Get a contiguous memory address (remove the MMIO gap) */
static u64 remove_mmio_gap(u64 sys)
{
- return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
+ return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm);
}
/* Squeeze out one address bit, shift upper part down to fill gap */
@@ -598,7 +599,7 @@ static void remove_addr_bit(u64 *addr, int bitidx)
if (bitidx == -1)
return;
- mask = (1ull << bitidx) - 1;
+ mask = BIT_ULL(bitidx) - 1;
*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
}
@@ -642,8 +643,8 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
int sym_chan_shift = sym_channels >> 1;
/* Give up if address is out of range, or in MMIO gap */
- if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
- (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
+ if (addr >= BIT(PND_MAX_PHYS_BIT) ||
+ (addr >= top_lm && addr < SZ_4G) || addr >= top_hm) {
snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
return -EINVAL;
}
@@ -727,10 +728,10 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
}
/* Translate PMI address to memory (rank, row, bank, column) */
-#define C(n) (0x10 | (n)) /* column */
-#define B(n) (0x20 | (n)) /* bank */
-#define R(n) (0x40 | (n)) /* row */
-#define RS (0x80) /* rank */
+#define C(n) (BIT(4) | (n)) /* column */
+#define B(n) (BIT(5) | (n)) /* bank */
+#define R(n) (BIT(6) | (n)) /* row */
+#define RS (BIT(7)) /* rank */
/* addrdec values */
#define AMAP_1KB 0
@@ -1064,9 +1065,9 @@ static int apl_check_ecc_active(void)
int i, ret = 0;
/* Check dramtype and ECC mode for each present DIMM */
- for (i = 0; i < APL_NUM_CHANNELS; i++)
- if (chan_mask & BIT(i))
- ret += check_channel(i);
+ for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS)
+ ret += check_channel(i);
+
return ret ? -EINVAL : 0;
}
@@ -1205,10 +1206,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
u64 capacity;
int i, g;
- for (i = 0; i < APL_NUM_CHANNELS; i++) {
- if (!(chan_mask & BIT(i)))
- continue;
-
+ for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) {
dimm = edac_get_dimm(mci, i, 0, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d\n", i);
@@ -1228,8 +1226,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
}
pvt->dimm_geom[i] = g;
- capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
- (1ul << dimms[g].colbits);
+ capacity = (d->rken0 + d->rken1) * 8 * BIT(dimms[g].rowbits + dimms[g].colbits);
edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
@@ -1295,7 +1292,7 @@ static void dnv_get_dimm_config(struct mem_ctl_info *mci)
continue;
}
- capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
+ capacity = ranks_of_dimm[j] * banks * BIT(rowbits + colbits);
edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 046969b4e82e..1eea3341a916 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1329,8 +1329,7 @@ static int ppc4xx_edac_probe(struct platform_device *op)
*
* Unconditionally returns 0.
*/
-static int
-ppc4xx_edac_remove(struct platform_device *op)
+static void ppc4xx_edac_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
@@ -1344,8 +1343,6 @@ ppc4xx_edac_remove(struct platform_device *op)
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
-
- return 0;
}
/**
@@ -1379,7 +1376,7 @@ ppc4xx_edac_opstate_init(void)
static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
- .remove = ppc4xx_edac_remove,
+ .remove_new = ppc4xx_edac_remove,
.driver = {
.name = PPC4XX_EDAC_MODULE_NAME,
.of_match_table = ppc4xx_edac_match,
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index b2db545c6810..5539917c01dd 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -390,14 +390,12 @@ irq_done:
return rc;
}
-static int qcom_llcc_edac_remove(struct platform_device *pdev)
+static void qcom_llcc_edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edev_ctl = dev_get_drvdata(&pdev->dev);
edac_device_del_device(edev_ctl->dev);
edac_device_free_ctl_info(edev_ctl);
-
- return 0;
}
static const struct platform_device_id qcom_llcc_edac_id_table[] = {
@@ -408,7 +406,7 @@ MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
- .remove = qcom_llcc_edac_remove,
+ .remove_new = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 0c779a0326b6..26cca5a9322d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -439,7 +439,7 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* This changes depending if 1HA or 2HA:
@@ -505,7 +505,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* Haswell support */
@@ -576,7 +576,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* Knight's Landing Support */
@@ -620,7 +620,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
static const struct pci_id_table pci_dev_descr_knl_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
- {0,}
+ { NULL, }
};
/*
@@ -686,7 +686,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index ce3e0069e028..9c5b6f8bd8bd 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -648,6 +648,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
+ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
+ pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
+ return NOTIFY_DONE;
+ }
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index c4fc64cbecd0..709babce43ba 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1410,7 +1410,7 @@ free_edac_mc:
*
* Return: Unconditionally 0
*/
-static int mc_remove(struct platform_device *pdev)
+static void mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct synps_edac_priv *priv = mci->pvt_info;
@@ -1425,8 +1425,6 @@ static int mc_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver synps_edac_mc_driver = {
@@ -1435,7 +1433,7 @@ static struct platform_driver synps_edac_mc_driver = {
.of_match_table = synps_edac_match,
},
.probe = mc_probe,
- .remove = mc_remove,
+ .remove_new = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index b9c5772da959..90d46e5c4ff0 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) {
@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors,
ctx->reg_lane_int[lane]);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
}
if (ctx->reg_com_int & OCX_COM_INT_CE)
@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
- strncat(msg, other, L2C_MESSAGE_SIZE);
+ strlcat(msg, other, L2C_MESSAGE_SIZE);
if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 6971ded598de..29723c9592f7 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -312,19 +312,17 @@ err:
return ret;
}
-static int ti_edac_remove(struct platform_device *pdev)
+static void ti_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver ti_edac_driver = {
.probe = ti_edac_probe,
- .remove = ti_edac_remove,
+ .remove_new = ti_edac_remove,
.driver = {
.name = EDAC_MOD_NAME,
.of_match_table = ti_edac_of_match,
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index c52b9dd9154c..1b50f8160013 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1960,7 +1960,7 @@ out_err:
return rc;
}
-static int xgene_edac_remove(struct platform_device *pdev)
+static void xgene_edac_remove(struct platform_device *pdev)
{
struct xgene_edac *edac = dev_get_drvdata(&pdev->dev);
struct xgene_edac_mc_ctx *mcu;
@@ -1981,8 +1981,6 @@ static int xgene_edac_remove(struct platform_device *pdev)
list_for_each_entry_safe(node, temp_node, &edac->socs, next)
xgene_edac_soc_remove(node);
-
- return 0;
}
static const struct of_device_id xgene_edac_of_match[] = {
@@ -1993,7 +1991,7 @@ MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
static struct platform_driver xgene_edac_driver = {
.probe = xgene_edac_probe,
- .remove = xgene_edac_remove,
+ .remove_new = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
.of_match_table = xgene_edac_of_match,
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index ac7d1e0b324c..2d9a5cfd8931 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -426,7 +426,7 @@ free_dev_ctl:
return ret;
}
-static int edac_remove(struct platform_device *pdev)
+static void edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct edac_priv *priv = dci->pvt_info;
@@ -440,8 +440,6 @@ static int edac_remove(struct platform_device *pdev)
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
-
- return 0;
}
static const struct of_device_id zynqmp_ocm_edac_match[] = {
@@ -457,7 +455,7 @@ static struct platform_driver zynqmp_ocm_edac_driver = {
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
- .remove = edac_remove,
+ .remove_new = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7e88fd489741..9db9290c3269 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
+// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
+// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
+// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
+// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
+// while it is probable due to detection of any type of PCIe error.
+#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
+
+#if IS_ENABLED(CONFIG_X86)
+
+static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
+{
+ return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
+}
+
+#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
+
+static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
+{
+ const struct pci_dev *pcie_to_pci_bridge;
+
+ // Detect any type of AMD Ryzen machine.
+ if (!static_cpu_has(X86_FEATURE_ZEN))
+ return false;
+
+ // Detect VIA VT6306/6307/6308.
+ if (pdev->vendor != PCI_VENDOR_ID_VIA)
+ return false;
+ if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
+ return false;
+
+ // Detect Asmedia ASM1083/1085.
+ pcie_to_pci_bridge = pdev->bus->self;
+ if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
+ return false;
+ if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
+ return false;
+
+ return true;
+}
+
+#else
+#define has_reboot_by_cycle_timer_read_quirk(ohci) false
+#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
+#endif
+
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, revision, flags;
@@ -1724,6 +1769,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
s32 diff01, diff12;
int i;
+ if (has_reboot_by_cycle_timer_read_quirk(ohci))
+ return 0;
+
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
@@ -3630,6 +3678,9 @@ static int pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
+ ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
+
/*
* Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index f80d87c199c3..937be269fee8 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -18,8 +18,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
struct acpi_device *adev;
struct device *phys_dev;
char hid[ACPI_ID_LEN];
- u64 uid;
- int ret;
if (node->header.length != 12)
return -EINVAL;
@@ -31,10 +29,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
node->acpi.hid >> 16);
for_each_acpi_dev_match(adev, hid, NULL, -1) {
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret == 0 && node->acpi.uid == uid)
+ if (acpi_dev_uid_match(adev, node->acpi.uid))
break;
- if (ret == -ENODATA && node->acpi.uid == 0)
+ if (!acpi_device_uid(adev) && node->acpi.uid == 0)
break;
}
if (!adev)
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 2c489627a807..65ffd0b760b2 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -5,8 +5,8 @@
# EFI_ZBOOT_FORWARD_CFI
quiet_cmd_copy_and_pad = PAD $@
- cmd_copy_and_pad = cp $< $@ && \
- truncate -s $(shell hexdump -s16 -n4 -e '"%u"' $<) $@
+ cmd_copy_and_pad = cp $< $@; \
+ truncate -s $$(hexdump -s16 -n4 -e '"%u"' $<) $@
# Pad the file to the size of the uncompressed image in memory, including BSS
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 479dd445acdc..77359e802181 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -13,8 +13,8 @@ bool efi_no5lvl;
static void (*la57_toggle)(void *cr3);
static const struct desc_struct gdt[] = {
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
};
/*
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index da9b7b8d0716..0d510c9a06a4 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -787,6 +787,8 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
seed[0] = 0;
}
+
+ boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index dd7a783d53b5..e73f88050f08 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1872,7 +1872,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
- eventfd_signal(trigger, 1);
+ eventfd_signal(trigger);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8dee52ce26d0..93cf73d6fa11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2188,15 +2188,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin) {
- /*
- * FIXME: The bounding box is still needed by Navi12, so
- * temporarily read it from gpu_info firmware. Should be dropped
- * when DAL no longer needs it.
- */
- if (adev->asic_type != CHIP_NAVI12)
- return 0;
- }
+ if (adev->mman.discovery_bin)
+ return 0;
switch (adev->asic_type) {
default:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c8c00c2a5224..4e82ee4d74ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6170,8 +6170,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
-
- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+ else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -6187,8 +6188,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ if (stream->link->psr_settings.psr_feature_enabled)
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6914,8 +6916,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div)
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ec77b2b41ba3..d2271e308fa0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -440,7 +440,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 1069.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 1324.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 1670.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ },
+
+ .num_states = 5,
+ .sr_exit_time_us = 1.9,
+ .sr_enter_plus_exit_time_us = 4.4,
+ .urgent_latency_us = 3.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 16,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 45.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 50,
+ .use_urgent_burst_bw = 0,
+};
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 84f9b412a4f1..738ee763f24a 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,12 +147,15 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
+ }
+
+ if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1c5049e894e3..c2ccf3724e37 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -318,6 +318,7 @@ enum pp_xgmi_plpd_mode {
#define MAX_GFX_CLKS 8
#define MAX_CLKS 4
#define NUM_VCN 4
+#define NUM_JPEG_ENG 32
struct seq_file;
enum amd_pp_clock_type;
@@ -774,6 +775,85 @@ struct gpu_metrics_v1_4 {
uint16_t padding;
};
+struct gpu_metrics_v1_5 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+ uint16_t vcn_activity[NUM_VCN];
+ uint16_t jpeg_activity[NUM_JPEG_ENG];
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ uint16_t padding;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 49028dde0f87..20c53eefd680 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2128,7 +2128,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index fef2d290f3f2..7b812b9994d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x9
+#define SMU_METRICS_TABLE_VERSION 0xB
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -219,7 +219,103 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
-} MetricsTable_t;
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableX_t;
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+ uint64_t PublicSerialNumber_CCD[12];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableA_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 900a2d9e6d85..b64e07b75937 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -245,6 +245,8 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+ (metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -327,7 +329,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -335,12 +338,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+ sizeof(MetricsTableA_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_4);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -431,9 +435,11 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
+ struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
/* Store one-time values in driver PPTable */
@@ -444,7 +450,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (metrics->AccumulationCounter)
+ if (GET_METRIC_FIELD(AccumulationCounter))
break;
usleep_range(1000, 1100);
@@ -454,29 +460,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return -ETIME;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(metrics->MaxSocketPowerLimit);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(metrics->MaxGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(metrics->MinGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->FclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->UclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- metrics->SocclkFrequencyTable[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->VclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->DclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->LclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
pptable->Init = true;
}
@@ -778,7 +784,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -793,50 +800,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_GFXCLK:
if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -2022,67 +2029,70 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_4 *gpu_metrics =
- (struct gpu_metrics_v1_4 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_5 *gpu_metrics =
+ (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i;
- MetricsTable_t *metrics;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableX_t *metrics_x;
+ MetricsTableA_t *metrics_a;
u16 link_width_level;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
if (ret) {
- kfree(metrics);
+ kfree(metrics_x);
return ret;
}
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4);
+ metrics_a = (MetricsTableA_t *)metrics_x;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(metrics->MaxVrTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(metrics->SocketGfxBusy);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(metrics->SocketPower);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
/* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
@@ -2094,38 +2104,57 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics->PCIeL0ToRecoveryCountAcc;
+ metrics_x->PCIeL0ToRecoveryCountAcc;
gpu_metrics->pcie_replay_count_acc =
- metrics->PCIenReplayAAcc;
+ metrics_x->PCIenReplayAAcc;
gpu_metrics->pcie_replay_rover_count_acc =
- metrics->PCIenReplayARolloverCountAcc;
+ metrics_x->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc =
+ metrics_x->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc =
+ metrics_x->PCIeNAKReceivedCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_activity[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
- gpu_metrics->firmware_timestamp = metrics->Timestamp;
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
*table = (void *)gpu_metrics;
- kfree(metrics);
+ kfree(metrics_x);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 001a5cf09657..00cd615bbcdc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -989,6 +989,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 4):
structure_size = sizeof(struct gpu_metrics_v1_4);
break;
+ case METRICS_VERSION(1, 5):
+ structure_size = sizeof(struct gpu_metrics_v1_5);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 8161b1a1a4b1..541e4f5afc4c 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -210,7 +210,7 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
struct ps8640 *ps_bridge = aux_to_ps8640(aux);
struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
- unsigned int len = msg->size;
+ size_t len = msg->size;
unsigned int data;
unsigned int base;
int ret;
@@ -330,11 +330,12 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
return ret;
}
- buf[i] = data;
+ if (i < msg->size)
+ buf[i] = data;
}
}
- return len;
+ return min(len, msg->size);
}
static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index c45c07840f64..b5464199b633 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
u32 request_val = AUX_CMD_REQ(msg->request);
u8 *buf = msg->buffer;
unsigned int len = msg->size;
+ unsigned int short_len;
unsigned int val;
int ret;
u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
@@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
}
if (val & AUX_IRQ_STATUS_AUX_SHORT) {
- ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
+ ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len);
+ len = min(len, short_len);
if (ret)
goto exit;
} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 01da6789d044..b9cc62982196 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1365,7 +1365,7 @@ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
struct syncobj_eventfd_entry *entry =
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
@@ -1388,13 +1388,13 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
entry->fence = fence;
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
} else {
ret = dma_fence_add_callback(fence, &entry->fence_cb,
syncobj_eventfd_entry_fence_func);
if (ret == -ENOENT) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index b21bcd40f111..62ce92772367 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -4496,7 +4496,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp->train_set, crtc_state->lane_count);
drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
- link_status[DP_DPCD_REV]);
+ intel_dp->dpcd[DP_DPCD_REV]);
}
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 6bc26b4b06b8..ea7561ae6e13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct sg_table *st;
struct scatterlist *sg;
unsigned int npages; /* restricted by sg_alloc_table */
- int max_order = MAX_ORDER;
+ int max_order = MAX_PAGE_ORDER;
unsigned int max_segment;
gfp_t gfp;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6b9f6cf50bf6..84c50c4c4af7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order > MAX_ORDER);
+ GEM_BUG_ON(order > MAX_PAGE_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index de3f5903d1a7..c8e7dfc9f791 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -422,7 +422,7 @@ static void init_irq_map(struct intel_gvt_irq *irq)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
-static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
@@ -434,10 +434,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
/* Do not generate MSI if MSIEN is disabled */
if (!(control & MSI_CAP_EN))
- return 0;
+ return;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
- return -EINVAL;
+ return;
trace_inject_msi(vgpu->id, addr, data);
@@ -451,10 +451,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* returned and don't inject interrupt into guest.
*/
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -ESRCH;
- if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
- return -EFAULT;
- return 0;
+ return;
+ if (vgpu->msi_trigger)
+ eventfd_signal(vgpu->msi_trigger);
}
static void propagate_event(struct intel_gvt_irq *irq,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 7b1c8de2f9cb..2d695818f006 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -772,10 +772,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* The reason field includes flags identifying what
* triggered this specific report (mostly timer
* triggered or e.g. due to a context switch).
- *
- * In MMIO triggered reports, some platforms do not set the
- * reason bit in this field and it is valid to have a reason
- * field of zero.
*/
reason = oa_report_reason(stream, report);
ctx_id = oa_context_id(stream, report32);
@@ -787,8 +783,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
+ *
+ * Update:
+ *
+ * On XEHP platforms the behavior of context id valid bit has
+ * changed compared to prior platforms. To describe this, we
+ * define a few terms:
+ *
+ * context-switch-report: This is a report with the reason type
+ * being context-switch. It is generated when a context switches
+ * out.
+ *
+ * context-valid-bit: A bit that is set in the report ID field
+ * to indicate that a valid context has been loaded.
+ *
+ * gpu-idle: A condition characterized by a
+ * context-switch-report with context-valid-bit set to 0.
+ *
+ * On prior platforms, context-id-valid bit is set to 0 only
+ * when GPU goes idle. In all other reports, it is set to 1.
+ *
+ * On XEHP platforms, context-valid-bit is set to 1 in a context
+ * switch report if a new context switched in. For all other
+ * reports it is set to 0.
+ *
+ * This change in behavior causes an issue with MMIO triggered
+ * reports. MMIO triggered reports have the markers in the
+ * context ID field and the context-valid-bit is 0. The logic
+ * below to squash the context ID would render the report
+ * useless since the user will not be able to find it in the OA
+ * buffer. Since MMIO triggered reports exist only on XEHP,
+ * we should avoid squashing these for XEHP platforms.
*/
- if (oa_report_ctx_invalid(stream, report)) {
+
+ if (oa_report_ctx_invalid(stream, report) &&
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 57c7edcab602..765e49fd8911 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -392,6 +392,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
+void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_set_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
+
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index bce267e0f7de..8d4538b71047 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index ac957f42abe1..56e6f986bff3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index bd6e573c9a1a..ff2b3c6622e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index af3ce5a6a636..0f0d59938c3a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -28,8 +28,8 @@
* This file contains setup code for the CRTC.
*/
-static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+ const struct drm_format_info *format)
{
int i;
@@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-static void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_set_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
int i;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 2fa0445d8928..d1437c08645f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -187,7 +187,7 @@ struct nvkm_gsp {
void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
@@ -265,7 +265,7 @@ nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
}
-static inline void *
+static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
@@ -275,21 +275,24 @@ static inline void *
nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
{
void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+ int ret;
if (IS_ERR(argv))
return argv;
- return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+ ret = nvkm_gsp_rm_ctrl_push(object, &argv, repc);
+ if (ret)
+ return ERR_PTR(ret);
+ return argv;
}
static inline int
nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
-
- if (IS_ERR(repv))
- return PTR_ERR(repv);
+ int ret = nvkm_gsp_rm_ctrl_push(object, &argv, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ca762ea55413..5057d976fa57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- if (!--fctx->notify_ref)
+ if (atomic_dec_and_test(&fctx->notify_ref))
drop = 1;
}
@@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
+ cancel_work_sync(&fctx->allow_block_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@@ -167,6 +168,18 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
return ret;
}
+static void
+nouveau_fence_work_allow_block(struct work_struct *work)
+{
+ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+ allow_block_work);
+
+ if (atomic_read(&fctx->notify_ref) == 0)
+ nvif_event_block(&fctx->event);
+ else
+ nvif_event_allow(&fctx->event);
+}
+
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
@@ -178,6 +191,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
+ INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
@@ -521,15 +535,19 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
+ bool do_work;
- if (!fctx->notify_ref++)
- nvif_event_allow(&fctx->event);
+ if (atomic_inc_return(&fctx->notify_ref) == 0)
+ do_work = true;
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
- else if (!--fctx->notify_ref)
- nvif_event_block(&fctx->event);
+ else if (atomic_dec_and_test(&fctx->notify_ref))
+ do_work = true;
+
+ if (do_work)
+ schedule_work(&fctx->allow_block_work);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64d33ae7f356..28f5cf013b89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -3,6 +3,7 @@
#define __NOUVEAU_FENCE_H__
#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
#include <nvif/event.h>
struct nouveau_drm;
@@ -45,7 +46,9 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
- int notify_ref, dead, killed;
+ struct work_struct allow_block_work;
+ atomic_t notify_ref;
+ int dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 457ec5db794d..b24eb1e560bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -209,7 +209,7 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
nvkm_head_del(&head);
}
- if (disp->func->dtor)
+ if (disp->func && disp->func->dtor)
disp->func->dtor(disp);
return data;
@@ -243,8 +243,10 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
spin_lock_init(&disp->client.lock);
ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
- if (ret)
+ if (ret) {
+ disp->func = NULL;
return ret;
+ }
if (func->super) {
disp->super.wq = create_singlethread_workqueue("nvkm-disp");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 298035070b3a..6a0a4d3b8902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -282,7 +282,7 @@ r535_sor_bl_get(struct nvkm_ior *sor)
{
struct nvkm_disp *disp = sor->disp;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int lvl;
+ int ret, lvl;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
@@ -292,9 +292,11 @@ r535_sor_bl_get(struct nvkm_ior *sor)
ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
lvl = ctrl->brightness;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -649,9 +651,11 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return (void *)ctrl;
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ERR_PTR(ret);
+ }
list_for_each_entry(conn, &disp->conns, head) {
if (conn->index == ctrl->data[0].index) {
@@ -686,7 +690,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
struct nvkm_disp *disp = outp->disp;
struct nvkm_ior *ior;
NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
- int or;
+ int ret, or;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
@@ -699,9 +703,11 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
if (hda)
ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
@@ -727,6 +733,7 @@ static int
r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
@@ -736,9 +743,11 @@ r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
ctrl->subDeviceInstance = 0;
ctrl->head = head;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*displayid = ctrl->displayId;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -772,9 +781,11 @@ r535_outp_inherit(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayId = displayid;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return NULL;
+ }
id = ctrl->index;
proto = ctrl->protocol;
@@ -825,6 +836,7 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
{
NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
if (IS_ERR(ctrl))
@@ -832,9 +844,11 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
ctrl->displayId, ctrl->flags, ctrl->flags2);
@@ -858,9 +872,11 @@ r535_outp_detect(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayMask = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
if (ctrl->displayMask & BIT(outp->index)) {
ret = r535_outp_dfp_get_info(outp);
@@ -895,6 +911,7 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
{
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
@@ -904,9 +921,11 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*pid = ctrl->displayIdAssigned;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -938,38 +957,60 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
{
struct nvkm_disp *disp = outp->disp;
NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
- int ret;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ int ret, retries;
+ u32 cmd, data;
- ctrl->subDeviceInstance = 0;
- ctrl->displayId = BIT(outp->index);
- ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
- ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
- NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
- NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+ cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+ data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+ NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+ NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
if (mst)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
if (target == 0 &&
(outp->dp.dpcd[DPCD_RC02] & 0x20) &&
!(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ /* We should retry up to 3 times, but only if GSP asks politely */
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->retryTimeMs = 0;
+ ctrl->cmd = cmd;
+ ctrl->data = data;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == -EAGAIN && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ /* GSP didn't say to retry, or we were successful */
+ if (ctrl->err)
+ ret = -EIO;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
+ }
- ret = ctrl->err ? -EIO : 0;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1036,9 +1077,11 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
memcpy(ctrl->data, data, size);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return PTR_ERR(ctrl);
+ }
memcpy(data, ctrl->data, size);
*psize = ctrl->size;
@@ -1111,10 +1154,13 @@ r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+ ret = -E2BIG;
if (ctrl->bufferSize <= *psize) {
memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
*psize = ctrl->bufferSize;
@@ -1153,9 +1199,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (ctrl->type) {
case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
@@ -1229,9 +1277,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->sorIndex = ~0;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
@@ -1465,8 +1515,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
1ULL << 0x00000014);
- printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
-
if (nbci || nvhg) {
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
@@ -1479,9 +1527,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
} else {
- printk(KERN_ERR "bl: obj type %d\n", obj->type);
- printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
-
for (int i = 0; i < obj->package.count; i++) {
union acpi_object *elt = &obj->package.elements[i];
u32 size;
@@ -1491,12 +1536,10 @@ r535_disp_oneinit(struct nvkm_disp *disp)
else
size = 4;
- printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
ctrl->backLightDataSize += size;
}
- printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
ctrl->status = 0;
ACPI_FREE(obj);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
index d088e636edc3..b903785056b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
@@ -242,6 +242,7 @@ r535_chan_id_put(struct nvkm_chan *chan)
nvkm_memory_unref(&userd->mem);
nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
list_del(&userd->head);
+ kfree(userd);
}
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 44fb86841c05..9ee58e2a0eb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -70,6 +70,20 @@ struct r535_gsp_msg {
#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+static int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EAGAIN;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
static void *
r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
{
@@ -298,7 +312,8 @@ retry:
struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
if (ntfy->fn == msg->function) {
- ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
break;
}
}
@@ -583,14 +598,14 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
return rpc;
if (rpc->status) {
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- ret = ERR_PTR(-EINVAL);
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
}
- if (IS_ERR_OR_NULL(ret))
- nvkm_gsp_rpc_done(gsp, rpc);
+ nvkm_gsp_rpc_done(gsp, rpc);
return ret;
}
@@ -623,29 +638,34 @@ r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ if (!repv)
+ return;
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
-static void *
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
struct nvkm_gsp *gsp = object->client->gsp;
- void *ret;
+ int ret = 0;
rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
+ if (IS_ERR_OR_NULL(rpc)) {
+ *argv = NULL;
+ return PTR_ERR(rpc);
+ }
if (rpc->status) {
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- ret = ERR_PTR(-EINVAL);
- } else {
- ret = repc ? rpc->params : NULL;
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}
- if (IS_ERR_OR_NULL(ret))
+ if (repc)
+ *argv = rpc->params;
+ else
nvkm_gsp_rpc_done(gsp, rpc);
return ret;
@@ -843,9 +863,11 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (WARN_ON(ret)) {
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return ret;
+ }
for (unsigned i = 0; i < ctrl->tableLen; i++) {
enum nvkm_subdev_type type;
@@ -1099,16 +1121,12 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
if (!obj)
return;
- printk(KERN_ERR "nvop: obj type %d\n", obj->type);
- printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
caps->status = 0;
caps->optimusCaps = *(u32 *)obj->buffer.pointer;
- printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
ACPI_FREE(obj);
@@ -1135,9 +1153,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
if (!obj)
return;
- printk(KERN_ERR "jt: obj type %d\n", obj->type);
- printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
@@ -1146,7 +1161,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
jt->jtCaps = *(u32 *)obj->buffer.pointer;
jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
jt->bSBIOSCaps = 0;
- printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
ACPI_FREE(obj);
@@ -1157,6 +1171,8 @@ static void
r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
MUX_METHOD_DATA_ELEMENT *part)
{
+ union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list input = { 1, &mux_arg };
acpi_handle iter = NULL, handle_mux = NULL;
acpi_status status;
unsigned long long value;
@@ -1179,14 +1195,18 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
if (!handle_mux)
return;
- status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+ /* I -think- 0 means "acquire" according to nvidia's driver source */
+ input.pointer->integer.type = ACPI_TYPE_INTEGER;
+ input.pointer->integer.value = 0;
+
+ status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
if (ACPI_SUCCESS(status)) {
mode->acpiId = id;
mode->mode = value;
mode->status = 0;
}
- status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+ status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
if (ACPI_SUCCESS(status)) {
part->acpiId = id;
part->mode = value;
@@ -1232,8 +1252,8 @@ r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
}
- printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
dod->status = 0;
+ kfree(output.pointer);
}
#endif
@@ -2186,7 +2206,9 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
-
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index b1b423b68cdf..19eaff22e6ae 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- for (int j = 0; j <= MAX_ORDER; ++j) {
+ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 2d9cae8cd984..cceaa18d4e46 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -109,7 +109,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
},
{
.description = "One page, with coherent DMA mappings enabled",
@@ -118,7 +118,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
.use_dma_alloc = true,
},
};
@@ -165,7 +165,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
fst_page = tt->pages[0];
last_page = tt->pages[tt->num_pages - 1];
- if (params->order <= MAX_ORDER) {
+ if (params->order <= MAX_PAGE_ORDER) {
if (params->use_dma_alloc) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
@@ -182,7 +182,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
* order 0 blocks
*/
KUNIT_ASSERT_EQ(test, fst_page->private,
- min_t(unsigned int, MAX_ORDER,
+ min_t(unsigned int, MAX_PAGE_ORDER,
params->order));
KUNIT_ASSERT_EQ(test, last_page->private, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index fe610a3cace0..b62f420a9f96 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -447,7 +447,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
+ for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
@@ -568,7 +568,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
+ for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
@@ -601,7 +601,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
+ for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
@@ -656,7 +656,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -667,7 +667,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -776,7 +776,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -816,7 +816,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 05b8b8dfa9bd..36587f38dff3 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -3,6 +3,7 @@
* i2c-core.h - interfaces internal to the I2C framework
*/
+#include <linux/kconfig.h>
#include <linux/rwsem.h>
struct i2c_devinfo {
@@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
*/
static inline bool i2c_in_atomic_xfer_mode(void)
{
- return system_state > SYSTEM_RUNNING && !preemptible();
+ return system_state > SYSTEM_RUNNING &&
+ (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled());
}
static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index dcda0afecfc5..bcf1198e8991 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -131,11 +131,12 @@ static unsigned int mwait_substates __initdata;
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
static __always_inline int __intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+ struct cpuidle_driver *drv,
+ int index, bool irqoff)
{
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1; /* break on interrupt flag */
+ unsigned long ecx = 1*irqoff; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
@@ -159,19 +160,13 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- return __intel_idle(dev, drv, index);
+ return __intel_idle(dev, drv, index, true);
}
static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- int ret;
-
- raw_local_irq_enable();
- ret = __intel_idle(dev, drv, index);
- raw_local_irq_disable();
-
- return ret;
+ return __intel_idle(dev, drv, index, false);
}
static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
@@ -184,7 +179,7 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
if (smt_active)
__update_spec_ctrl(0);
- ret = __intel_idle(dev, drv, index);
+ ret = __intel_idle(dev, drv, index, true);
if (smt_active)
__update_spec_ctrl(spec_ctrl);
@@ -196,7 +191,7 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
fpu_idle_fpregs();
- return __intel_idle(dev, drv, index);
+ return __intel_idle(dev, drv, index, true);
}
/**
@@ -923,6 +918,35 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state mtl_l_cstates[] __initdata = {
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 420,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C10",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 310,
+ .target_residency = 930,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state gmt_cstates[] __initdata = {
{
.name = "C1",
@@ -1242,6 +1266,72 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state grr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 10,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6S",
+ .desc = "MWAIT 0x22",
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state srf_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 10,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6S",
+ .desc = "MWAIT 0x22",
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 270,
+ .target_residency = 700,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6SP",
+ .desc = "MWAIT 0x23",
+ .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 310,
+ .target_residency = 900,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1349,6 +1439,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates,
};
+static const struct idle_cpu idle_cpu_mtl_l __initconst = {
+ .state_table = mtl_l_cstates,
+};
+
static const struct idle_cpu idle_cpu_gmt __initconst = {
.state_table = gmt_cstates,
};
@@ -1387,6 +1481,18 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_grr __initconst = {
+ .state_table = grr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
+static const struct idle_cpu idle_cpu_srf __initconst = {
+ .state_table = srf_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1423,6 +1529,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
@@ -1432,6 +1539,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
{}
};
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 9d316fdc6f9a..a155519a862f 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -11,8 +11,6 @@
#include <linux/types.h>
/* PCIe device related definition. */
-#define PCI_VENDOR_ID_ALIBABA 0x1ded
-
#define ERDMA_PCI_WIDTH 64
#define ERDMA_FUNC_BAR 0
#define ERDMA_MISX_BAR 2
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 8ba53edf2311..869369cb5b5f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2498,7 +2498,7 @@ static void dispatch_event_fd(struct list_head *fd_list,
list_for_each_entry_rcu(item, fd_list, xa_list) {
if (item->eventfd)
- eventfd_signal(item->eventfd, 1);
+ eventfd_signal(item->eventfd);
else
deliver_event(item, data);
}
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 852aeb0b2c07..07c866f42296 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -375,7 +375,7 @@ static int rmi_spi_probe(struct spi_device *spi)
struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
int error;
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index fcc987f5d4ed..b9a0523cbb0a 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3357,7 +3357,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
- iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
+ iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
apic->dest_mode_logical, irq_cfg->vector,
irq_cfg->dest_apicid, devid);
@@ -3634,7 +3634,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->dest_mode_logical;
- entry->lo.fields_remap.int_type = apic->delivery_mode;
+ entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
entry->hi.fields.vector = cfg->vector;
entry->lo.fields_remap.destination =
APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 961205ba86d2..925ac6a47bce 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -188,7 +188,7 @@
#ifdef CONFIG_CMA_ALIGNMENT
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
#else
-#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER)
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_PAGE_ORDER)
#endif
/*
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 85163a83df2f..e59f50e11ea8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -884,7 +884,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev);
- order_mask &= GENMASK(MAX_ORDER, 0);
+ order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
if (!order_mask)
return NULL;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 29b9e55dcf26..566297bc87dd 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1112,7 +1112,7 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
* irq migration in the presence of interrupt-remapping.
*/
irte->trigger_mode = 0;
- irte->dlvry_mode = apic->delivery_mode;
+ irte->dlvry_mode = APIC_DELIVERY_MODE_FIXED;
irte->vector = vector;
irte->dest_id = IRTE_DEST(dest);
irte->redir_hint = 1;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 9a7a74239eab..d097001c1e3e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2465,8 +2465,8 @@ static bool its_parse_indirect_baser(struct its_node *its,
* feature is not supported by hardware.
*/
new_order = max_t(u32, get_order(esz << ids), new_order);
- if (new_order > MAX_ORDER) {
- new_order = MAX_ORDER;
+ if (new_order > MAX_PAGE_ORDER) {
+ new_order = MAX_PAGE_ORDER;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type],
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 7124565234a5..cda5838d2232 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -14,6 +14,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
struct device *dev = &pdev->dev;
struct irq_domain *parent_domain;
struct generic_pm_domain *genpd;
+ struct device_node *msgram_np;
struct qcom_mpm_priv *priv;
unsigned int pin_cnt;
+ struct resource res;
int i, irq;
int ret;
@@ -374,9 +377,26 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
raw_spin_lock_init(&priv->lock);
- priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ /* If we have a handle to an RPM message ram partition, use it. */
+ msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
+ if (msgram_np) {
+ ret = of_address_to_resource(msgram_np, 0, &res);
+ if (ret) {
+ of_node_put(msgram_np);
+ return ret;
+ }
+
+ /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
+ priv->base = devm_ioremap(dev, res.start, resource_size(&res));
+ of_node_put(msgram_np);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+ } else {
+ /* Otherwise, fall back to simple MMIO. */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+ }
for (i = 0; i < priv->reg_stride; i++) {
qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index fe8d516f3614..9494fc26259c 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#define IRQC_IRQ_START 1
#define IRQC_IRQ_COUNT 8
@@ -28,8 +29,7 @@
#define ISCR 0x10
#define IITSR 0x14
#define TSCR 0x20
-#define TITSR0 0x24
-#define TITSR1 0x28
+#define TITSR(n) (0x24 + (n) * 4)
#define TITSR0_MAX_INT 16
#define TITSEL_WIDTH 0x2
#define TSSR(n) (0x30 + ((n) * 4))
@@ -53,15 +53,33 @@
#define IITSR_IITSEL_EDGE_BOTH 3
#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
-#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
-#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-struct rzg2l_irqc_priv {
- void __iomem *base;
- struct irq_fwspec fwspec[IRQC_NUM_IRQ];
- raw_spinlock_t lock;
+/**
+ * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
+ * @iitsr: IITSR register
+ * @titsr: TITSR registers
+ */
+struct rzg2l_irqc_reg_cache {
+ u32 iitsr;
+ u32 titsr[2];
};
+/**
+ * struct rzg2l_irqc_priv - IRQ controller private data structure
+ * @base: Controller's base address
+ * @fwspec: IRQ firmware specific data
+ * @lock: Lock to serialize access to hardware registers
+ * @cache: Registers cache for suspend/resume
+ */
+static struct rzg2l_irqc_priv {
+ void __iomem *base;
+ struct irq_fwspec fwspec[IRQC_NUM_IRQ];
+ raw_spinlock_t lock;
+ struct rzg2l_irqc_reg_cache cache;
+} *rzg2l_irqc_data;
+
static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
@@ -72,11 +90,17 @@ static void rzg2l_irq_eoi(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 bit = BIT(hw_irq);
- u32 reg;
+ u32 iitsr, iscr;
- reg = readl_relaxed(priv->base + ISCR);
- if (reg & bit)
- writel_relaxed(reg & ~bit, priv->base + ISCR);
+ iscr = readl_relaxed(priv->base + ISCR);
+ iitsr = readl_relaxed(priv->base + IITSR);
+
+ /*
+ * ISCR can only be cleared if the type is falling-edge, rising-edge or
+ * falling/rising-edge.
+ */
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
+ writel_relaxed(iscr & ~bit, priv->base + ISCR);
}
static void rzg2l_tint_eoi(struct irq_data *d)
@@ -188,8 +212,7 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
- u32 offset;
- u8 sense;
+ u8 index, sense;
u32 reg;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -205,17 +228,17 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- offset = TITSR0;
+ index = 0;
if (titseln >= TITSR0_MAX_INT) {
titseln -= TITSR0_MAX_INT;
- offset = TITSR1;
+ index = 1;
}
raw_spin_lock(&priv->lock);
- reg = readl_relaxed(priv->base + offset);
+ reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
- writel_relaxed(reg, priv->base + offset);
+ writel_relaxed(reg, priv->base + TITSR(index));
raw_spin_unlock(&priv->lock);
return 0;
@@ -236,6 +259,38 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
+static int rzg2l_irqc_irq_suspend(void)
+{
+ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
+ void __iomem *base = rzg2l_irqc_data->base;
+
+ cache->iitsr = readl_relaxed(base + IITSR);
+ for (u8 i = 0; i < 2; i++)
+ cache->titsr[i] = readl_relaxed(base + TITSR(i));
+
+ return 0;
+}
+
+static void rzg2l_irqc_irq_resume(void)
+{
+ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
+ void __iomem *base = rzg2l_irqc_data->base;
+
+ /*
+ * Restore only interrupt type. TSSRx will be restored at the
+ * request of pin controller to avoid spurious interrupts due
+ * to invalid PIN states.
+ */
+ for (u8 i = 0; i < 2; i++)
+ writel_relaxed(cache->titsr[i], base + TITSR(i));
+ writel_relaxed(cache->iitsr, base + IITSR);
+}
+
+static struct syscore_ops rzg2l_irqc_syscore_ops = {
+ .suspend = rzg2l_irqc_irq_suspend,
+ .resume = rzg2l_irqc_irq_resume,
+};
+
static const struct irq_chip irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
@@ -321,7 +376,6 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
struct irq_domain *irq_domain, *parent_domain;
struct platform_device *pdev;
struct reset_control *resetn;
- struct rzg2l_irqc_priv *priv;
int ret;
pdev = of_find_device_by_node(node);
@@ -334,15 +388,15 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
return -ENODEV;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ if (!rzg2l_irqc_data)
return -ENOMEM;
- priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ if (IS_ERR(rzg2l_irqc_data->base))
+ return PTR_ERR(rzg2l_irqc_data->base);
- ret = rzg2l_irqc_parse_interrupts(priv, node);
+ ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
return ret;
@@ -365,17 +419,19 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
goto pm_disable;
}
- raw_spin_lock_init(&priv->lock);
+ raw_spin_lock_init(&rzg2l_irqc_data->lock);
irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
node, &rzg2l_irqc_domain_ops,
- priv);
+ rzg2l_irqc_data);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto pm_put;
}
+ register_syscore_ops(&rzg2l_irqc_syscore_ops);
+
return 0;
pm_put:
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 0c18d1f1e264..f9d6fce4da33 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -12,6 +12,7 @@
* Kevin Chea
*/
+#include <linux/bits.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@@ -19,8 +20,6 @@
#include <linux/irqchip/xtensa-pic.h>
#include <linux/of.h>
-unsigned int cached_irq_mask;
-
/*
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings. First cell value maps directly to the hwirq number.
@@ -44,34 +43,30 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = {
static void xtensa_irq_mask(struct irq_data *d)
{
- cached_irq_mask &= ~(1 << d->hwirq);
- xtensa_set_sr(cached_irq_mask, intenable);
-}
+ u32 irq_mask;
-static void xtensa_irq_unmask(struct irq_data *d)
-{
- cached_irq_mask |= 1 << d->hwirq;
- xtensa_set_sr(cached_irq_mask, intenable);
+ irq_mask = xtensa_get_sr(intenable);
+ irq_mask &= ~BIT(d->hwirq);
+ xtensa_set_sr(irq_mask, intenable);
}
-static void xtensa_irq_enable(struct irq_data *d)
+static void xtensa_irq_unmask(struct irq_data *d)
{
- xtensa_irq_unmask(d);
-}
+ u32 irq_mask;
-static void xtensa_irq_disable(struct irq_data *d)
-{
- xtensa_irq_mask(d);
+ irq_mask = xtensa_get_sr(intenable);
+ irq_mask |= BIT(d->hwirq);
+ xtensa_set_sr(irq_mask, intenable);
}
static void xtensa_irq_ack(struct irq_data *d)
{
- xtensa_set_sr(1 << d->hwirq, intclear);
+ xtensa_set_sr(BIT(d->hwirq), intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{
- unsigned int mask = 1u << d->hwirq;
+ unsigned int mask = BIT(d->hwirq);
if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
return 0;
@@ -81,8 +76,6 @@ static int xtensa_irq_retrigger(struct irq_data *d)
static struct irq_chip xtensa_irq_chip = {
.name = "xtensa",
- .irq_enable = xtensa_irq_enable,
- .irq_disable = xtensa_irq_disable,
.irq_mask = xtensa_irq_mask,
.irq_unmask = xtensa_irq_unmask,
.irq_ack = xtensa_irq_ack,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index f03d7dba270c..13c65b7e1ed6 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1170,7 +1170,7 @@ static void __cache_size_refresh(void)
* If the allocation may fail we use __get_free_pages. Memory fragmentation
* won't have a fatal effect here, but it just causes flushes of some other
* buffers and more I/O will be performed. Don't use __get_free_pages if it
- * always fails (i.e. order > MAX_ORDER).
+ * always fails (i.e. order > MAX_PAGE_ORDER).
*
* If the allocation shouldn't fail we use __vmalloc. This is only for the
* initial reserve allocation, so there's no risk of wasting all vmalloc
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2ae8560b6a14..855b482cbff1 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1673,7 +1673,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned int remaining_size;
- unsigned int order = MAX_ORDER;
+ unsigned int order = MAX_PAGE_ORDER;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f57fb821528d..7916ed9f10e8 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
remaining_size = size;
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
while (remaining_size) {
struct page *pages;
unsigned size_to_add, to_copy;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 23c32cd1f1d8..8dcabf84d866 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2675,7 +2675,7 @@ static int lock_fs(struct mapped_device *md)
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
- r = freeze_bdev(md->disk->part0);
+ r = bdev_freeze(md->disk->part0);
if (!r)
set_bit(DMF_FROZEN, &md->flags);
return r;
@@ -2685,7 +2685,7 @@ static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->disk->part0);
+ bdev_thaw(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 90ce58fd629e..925c19ee513b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2255,7 +2255,7 @@ config MFD_VEXPRESS_SYSREG
config RAVE_SP_CORE
tristate "RAVE SP MCU core driver"
depends on SERIAL_DEV_BUS
- select CRC_CCITT
+ select CRC_ITU_T
help
Select this to get support for the Supervisory Processor
device found on several devices in RAVE line of hardware.
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index da50eba10014..f62422740de2 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -9,7 +9,7 @@
*/
#include <linux/atomic.h>
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -251,7 +251,7 @@ static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
{
- const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+ const u16 calculated = crc_itu_t(0xffff, buf, size);
/*
* While the rest of the wire protocol is little-endian,
diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c
index f4b4f37f957f..24b72847e3f5 100644
--- a/drivers/mfd/tps6594-spi.c
+++ b/drivers/mfd/tps6594-spi.c
@@ -98,7 +98,7 @@ static int tps6594_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, tps);
tps->dev = dev;
- tps->reg = spi->chip_select;
+ tps->reg = spi_get_chipselect(spi, 0);
tps->irq = spi->irq;
tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 0562071cdd4a..6ad0ab892675 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -836,7 +836,8 @@ static inline bool cxl_is_power8(void)
{
if ((pvr_version_is(PVR_POWER8E)) ||
(pvr_version_is(PVR_POWER8NVL)) ||
- (pvr_version_is(PVR_POWER8)))
+ (pvr_version_is(PVR_POWER8)) ||
+ (pvr_version_is(PVR_HX_C2000)))
return true;
return false;
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 55fc5b80e649..4441aca2280a 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
if (vsize == 0)
return -EINVAL;
- if (get_order(vsize) > MAX_ORDER)
+ if (get_order(vsize) > MAX_PAGE_ORDER)
return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 1c798d6b2dfb..a2c4a9b4f871 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
- if (get_order(size) > MAX_ORDER)
+ if (get_order(size) > MAX_PAGE_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
@@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
- if (get_order(sgl->sgl_size) > MAX_ORDER) {
+ if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
return ret;
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index a06920b7e049..36f7379b8e2d 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
static irqreturn_t afu_irq_handler(int virq, void *data)
{
- struct afu_irq *irq = (struct afu_irq *) data;
+ struct afu_irq *irq = data;
trace_ocxl_afu_irq_receive(virq);
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 7f83116ae11a..cded7d1caf32 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(ocxl_context_alloc);
*/
static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
{
- struct ocxl_context *ctx = (struct ocxl_context *) data;
+ struct ocxl_context *ctx = data;
mutex_lock(&ctx->xsl_error_lock);
ctx->xsl_error.addr = addr;
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index ac69b7f361f5..7eb74711ac96 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -184,7 +184,7 @@ static irqreturn_t irq_handler(void *private)
{
struct eventfd_ctx *ev_ctx = private;
- eventfd_signal(ev_ctx, 1);
+ eventfd_signal(ev_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index c06c699c0e7b..03402203cacd 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -188,7 +188,7 @@ ack:
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
- struct ocxl_link *link = (struct ocxl_link *) data;
+ struct ocxl_link *link = data;
struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
@@ -483,7 +483,7 @@ static void release_xsl(struct kref *ref)
void ocxl_link_release(struct pci_dev *dev, void *link_handle)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
mutex_lock(&links_list_lock);
kref_put(&link->ref, release_xsl);
@@ -540,7 +540,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc = 0;
@@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc;
@@ -666,7 +666,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
int ocxl_link_remove_pe(void *link_handle, int pasid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
struct pe_data *pe_data;
@@ -752,7 +752,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
int irq;
if (atomic_dec_if_positive(&link->irq_available) < 0)
@@ -771,7 +771,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
xive_native_free_irq(hw_irq);
atomic_inc(&link->irq_available);
diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c
index ef73cf35dda2..658974143c3c 100644
--- a/drivers/misc/ocxl/main.c
+++ b/drivers/misc/ocxl/main.c
@@ -7,7 +7,7 @@
static int __init init_ocxl(void)
{
- int rc = 0;
+ int rc;
if (!tlbie_capable)
return -EINVAL;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f9a5cffa64b1..134c36edb6cf 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -851,9 +851,10 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == mask) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -868,9 +869,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == mask) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -3145,4 +3147,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 096093f7be00..2f51db4df1a8 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -692,6 +692,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
+ cancel_delayed_work_sync(&host->detect);
mmc_pwrseq_free(host);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 528ec8166e7c..1ed9731e77ef 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
- u32 rx_clk_phase;
+ u32 val, rx_clk_phase;
int ret;
meson_mx_sdhc_disable_clks(mmc);
@@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
mmc->actual_clock = clk_get_rate(host->sd_clk);
/*
- * according to Amlogic the following latching points are
- * selected with empirical values, there is no (known) formula
- * to calculate these.
+ * Phase 90 should work in most cases. For data transmission,
+ * meson_mx_sdhc_execute_tuning() will find a accurate value
*/
- if (mmc->actual_clock > 100000000) {
- rx_clk_phase = 1;
- } else if (mmc->actual_clock > 45000000) {
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- rx_clk_phase = 15;
- else
- rx_clk_phase = 11;
- } else if (mmc->actual_clock >= 25000000) {
- rx_clk_phase = 15;
- } else if (mmc->actual_clock > 5000000) {
- rx_clk_phase = 23;
- } else if (mmc->actual_clock > 1000000) {
- rx_clk_phase = 55;
- } else {
- rx_clk_phase = 1061;
- }
-
+ regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
+ rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4;
regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
MESON_SDHC_CLK2_RX_CLK_PHASE,
FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index cc333ad67cac..b0cccef4cfbf 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1322,7 +1322,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
*/
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
/* MMC and SD specs only seem to care that sampling is on the
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6b8a57e2d20f..bed57a1c64b5 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -239,15 +239,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
sdhci_enable_clk(host, div);
+ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
/* Enable CLK_AUTO when the clock is greater than 400K. */
if (clk > 400000) {
- val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
if (mask != (val & mask)) {
val |= mask;
sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
}
+ } else {
+ if (val & mask) {
+ val &= ~mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
}
}
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index a7ec947a3ebb..53019d313db7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev)
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mtd = &((card->mtd)[x]);
- if (mtd->usecount > 0)
+ if (kref_read(&mtd->refcnt))
return 0;
}
return 1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ff18636e0889..5bc32108ca03 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
- if (mtd->type == MTD_ABSENT)
+ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
return;
list_for_each_entry(tr, &blktrans_majors, list)
@@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
- if (mtd->type != MTD_ABSENT)
+ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
index 9596629000f4..968c5b674b08 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
static struct platform_driver bcm63138_nand_driver = {
.probe = bcm63138_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcm63138_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
index a06cd87f839a..05b7b653bdf3 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
@@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
static struct platform_driver bcm6368_nand_driver = {
.probe = bcm6368_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcm6368_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
index dd27977919fb..4e7e435ba339 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
@@ -119,7 +119,7 @@ static int brcmnand_bcma_nand_probe(struct platform_device *pdev)
static struct platform_driver brcmnand_bcma_nand_driver = {
.probe = brcmnand_bcma_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcma_brcmnand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 440bef477930..8faca43ae1ff 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -625,6 +625,8 @@ enum {
/* Only for v7.2 */
#define ACC_CONTROL_ECC_EXT_SHIFT 13
+static u8 brcmnand_status(struct brcmnand_host *host);
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -1022,19 +1024,6 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
return -1;
}
-static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
-{
- struct brcmnand_controller *ctrl = host->ctrl;
- int shift = brcmnand_sector_1k_shift(ctrl);
- u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
- BRCMNAND_CS_ACC_CONTROL);
-
- if (shift < 0)
- return 0;
-
- return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
-}
-
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -1061,10 +1050,11 @@ enum {
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
-static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+static int bcmnand_ctrl_poll_status(struct brcmnand_host *host,
u32 mask, u32 expected_val,
unsigned long timeout_ms)
{
+ struct brcmnand_controller *ctrl = host->ctrl;
unsigned long limit;
u32 val;
@@ -1073,6 +1063,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
@@ -1084,6 +1077,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
* do a final check after time out in case the CPU was busy and the driver
* did not get enough time to perform the polling to avoid false alarms
*/
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
@@ -1379,7 +1375,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
* make sure ctrl/flash ready before and after
* changing state of #WP pin
*/
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY |
NAND_STATUS_READY,
NAND_CTRL_RDY |
NAND_STATUS_READY, 0);
@@ -1387,9 +1383,10 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- nand_status_op(chip, NULL);
+ /* force controller operation to update internal copy of NAND chip status */
+ brcmnand_status(host);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
- ret = bcmnand_ctrl_poll_status(ctrl,
+ ret = bcmnand_ctrl_poll_status(host,
NAND_CTRL_RDY |
NAND_STATUS_READY |
NAND_STATUS_WP,
@@ -1629,13 +1626,13 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
*/
if (oops_in_progress) {
if (ctrl->cmd_pending &&
- bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
return;
} else
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
WARN_ON(ret);
mb(); /* flush previous writes */
@@ -1643,16 +1640,6 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
cmd << brcmnand_cmd_shift(ctrl));
}
-/***********************************************************************
- * NAND MTD API: read/program/erase
- ***********************************************************************/
-
-static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
- unsigned int ctrl)
-{
- /* intentionally left blank */
-}
-
static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
@@ -1664,7 +1651,7 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
if (mtd->oops_panic_write || ctrl->irq < 0) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
- sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+ sts = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
err = sts < 0;
} else {
@@ -1703,6 +1690,26 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
INTFC_FLASH_STATUS;
}
+static u8 brcmnand_status(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ brcmnand_set_cmd_addr(mtd, 0);
+ brcmnand_send_cmd(host, CMD_STATUS_READ);
+
+ return brcmnand_waitfunc(chip);
+}
+
+static u8 brcmnand_reset(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_send_cmd(host, CMD_FLASH_RESET);
+
+ return brcmnand_waitfunc(chip);
+}
+
enum {
LLOP_RE = BIT(16),
LLOP_WE = BIT(17),
@@ -1752,190 +1759,6 @@ static int brcmnand_low_level_op(struct brcmnand_host *host,
return brcmnand_waitfunc(chip);
}
-static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
- int column, int page_addr)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- u64 addr = (u64)page_addr << chip->page_shift;
- int native_cmd = 0;
-
- if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
- command == NAND_CMD_RNDOUT)
- addr = (u64)column;
- /* Avoid propagating a negative, don't-care address */
- else if (page_addr < 0)
- addr = 0;
-
- dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
- (unsigned long long)addr);
-
- host->last_cmd = command;
- host->last_byte = 0;
- host->last_addr = addr;
-
- switch (command) {
- case NAND_CMD_RESET:
- native_cmd = CMD_FLASH_RESET;
- break;
- case NAND_CMD_STATUS:
- native_cmd = CMD_STATUS_READ;
- break;
- case NAND_CMD_READID:
- native_cmd = CMD_DEVICE_ID_READ;
- break;
- case NAND_CMD_READOOB:
- native_cmd = CMD_SPARE_AREA_READ;
- break;
- case NAND_CMD_ERASE1:
- native_cmd = CMD_BLOCK_ERASE;
- brcmnand_wp(mtd, 0);
- break;
- case NAND_CMD_PARAM:
- native_cmd = CMD_PARAMETER_READ;
- break;
- case NAND_CMD_SET_FEATURES:
- case NAND_CMD_GET_FEATURES:
- brcmnand_low_level_op(host, LL_OP_CMD, command, false);
- brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
- break;
- case NAND_CMD_RNDOUT:
- native_cmd = CMD_PARAMETER_CHANGE_COL;
- addr &= ~((u64)(FC_BYTES - 1));
- /*
- * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
- * NB: hwcfg.sector_size_1k may not be initialized yet
- */
- if (brcmnand_get_sector_size_1k(host)) {
- host->hwcfg.sector_size_1k =
- brcmnand_get_sector_size_1k(host);
- brcmnand_set_sector_size_1k(host, 0);
- }
- break;
- }
-
- if (!native_cmd)
- return;
-
- brcmnand_set_cmd_addr(mtd, addr);
- brcmnand_send_cmd(host, native_cmd);
- brcmnand_waitfunc(chip);
-
- if (native_cmd == CMD_PARAMETER_READ ||
- native_cmd == CMD_PARAMETER_CHANGE_COL) {
- /* Copy flash cache word-wise */
- u32 *flash_cache = (u32 *)ctrl->flash_cache;
- int i;
-
- brcmnand_soc_data_bus_prepare(ctrl->soc, true);
-
- /*
- * Must cache the FLASH_CACHE now, since changes in
- * SECTOR_SIZE_1K may invalidate it
- */
- for (i = 0; i < FC_WORDS; i++)
- /*
- * Flash cache is big endian for parameter pages, at
- * least on STB SoCs
- */
- flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
-
- brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
-
- /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
- if (host->hwcfg.sector_size_1k)
- brcmnand_set_sector_size_1k(host,
- host->hwcfg.sector_size_1k);
- }
-
- /* Re-enable protection is necessary only after erase */
- if (command == NAND_CMD_ERASE1)
- brcmnand_wp(mtd, 1);
-}
-
-static uint8_t brcmnand_read_byte(struct nand_chip *chip)
-{
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- uint8_t ret = 0;
- int addr, offs;
-
- switch (host->last_cmd) {
- case NAND_CMD_READID:
- if (host->last_byte < 4)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
- (24 - (host->last_byte << 3));
- else if (host->last_byte < 8)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
- (56 - (host->last_byte << 3));
- break;
-
- case NAND_CMD_READOOB:
- ret = oob_reg_read(ctrl, host->last_byte);
- break;
-
- case NAND_CMD_STATUS:
- ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
- INTFC_FLASH_STATUS;
- if (wp_on) /* hide WP status */
- ret |= NAND_STATUS_WP;
- break;
-
- case NAND_CMD_PARAM:
- case NAND_CMD_RNDOUT:
- addr = host->last_addr + host->last_byte;
- offs = addr & (FC_BYTES - 1);
-
- /* At FC_BYTES boundary, switch to next column */
- if (host->last_byte > 0 && offs == 0)
- nand_change_read_column_op(chip, addr, NULL, 0, false);
-
- ret = ctrl->flash_cache[offs];
- break;
- case NAND_CMD_GET_FEATURES:
- if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
- ret = 0;
- } else {
- bool last = host->last_byte ==
- ONFI_SUBFEATURE_PARAM_LEN - 1;
- brcmnand_low_level_op(host, LL_OP_RD, 0, last);
- ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
- }
- }
-
- dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
- host->last_byte++;
-
- return ret;
-}
-
-static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- int i;
-
- for (i = 0; i < len; i++, buf++)
- *buf = brcmnand_read_byte(chip);
-}
-
-static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- int i;
- struct brcmnand_host *host = nand_get_controller_data(chip);
-
- switch (host->last_cmd) {
- case NAND_CMD_SET_FEATURES:
- for (i = 0; i < len; i++)
- brcmnand_low_level_op(host, LL_OP_WR, buf[i],
- (i + 1) == len);
- break;
- default:
- BUG();
- break;
- }
-}
-
/*
* Kick EDU engine
*/
@@ -2345,8 +2168,9 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -2359,8 +2183,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ u64 addr = (u64)page << chip->page_shift;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
@@ -2468,11 +2293,11 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ host->last_addr = addr;
- return nand_prog_page_end_op(chip);
+ return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2481,13 +2306,15 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
+ int ret = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return nand_prog_page_end_op(chip);
+ return ret;
}
static int brcmnand_write_oob(struct nand_chip *chip, int page)
@@ -2511,6 +2338,130 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
return ret;
}
+static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr = &op->instrs[i];
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const u8 *out;
+ bool last_op;
+ int ret = 0;
+ u8 *in;
+
+ /*
+ * The controller needs to be aware of the last command in the operation
+ * (WAITRDY excepted).
+ */
+ last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
+ ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR));
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ brcmnand_low_level_op(host, LL_OP_CMD, instr->ctx.cmd.opcode, last_op);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ brcmnand_low_level_op(host, LL_OP_ADDR, instr->ctx.addr.addrs[i],
+ last_op && (i == (instr->ctx.addr.naddrs - 1)));
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ in = instr->ctx.data.buf.in;
+ for (i = 0; i < instr->ctx.data.len; i++) {
+ brcmnand_low_level_op(host, LL_OP_RD, 0,
+ last_op && (i == (instr->ctx.data.len - 1)));
+ in[i] = brcmnand_read_reg(host->ctrl, BRCMNAND_LL_RDATA);
+ }
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ out = instr->ctx.data.buf.out;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, out[i],
+ last_op && (i == (instr->ctx.data.len - 1)));
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ break;
+
+ default:
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n",
+ instr->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_op_is_status(const struct nand_operation *op)
+{
+ if ((op->ninstrs == 2) &&
+ (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
+ (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) &&
+ (op->instrs[1].type == NAND_OP_DATA_IN_INSTR))
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_op_is_reset(const struct nand_operation *op)
+{
+ if ((op->ninstrs == 2) &&
+ (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
+ (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) &&
+ (op->instrs[1].type == NAND_OP_WAITRDY_INSTR))
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *status;
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ if (brcmnand_op_is_status(op)) {
+ status = op->instrs[1].ctx.data.buf.in;
+ *status = brcmnand_status(host);
+
+ return 0;
+ }
+ else if (brcmnand_op_is_reset(op)) {
+ ret = brcmnand_reset(host);
+ if (ret < 0)
+ return ret;
+
+ brcmnand_wp(mtd, 1);
+
+ return 0;
+ }
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 1);
+
+ return ret;
+}
+
/***********************************************************************
* Per-CS setup (1 NAND device)
***********************************************************************/
@@ -2821,6 +2772,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops brcmnand_controller_ops = {
.attach_chip = brcmnand_attach_chip,
+ .exec_op = brcmnand_exec_op,
};
static int brcmnand_init_cs(struct brcmnand_host *host,
@@ -2845,13 +2797,6 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
- chip->legacy.cmdfunc = brcmnand_cmdfunc;
- chip->legacy.waitfunc = brcmnand_waitfunc;
- chip->legacy.read_byte = brcmnand_read_byte;
- chip->legacy.read_buf = brcmnand_read_buf;
- chip->legacy.write_buf = brcmnand_write_buf;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.read_page = brcmnand_read_page;
chip->ecc.write_page = brcmnand_write_page;
@@ -2863,6 +2808,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
chip->ecc.write_oob = brcmnand_write_oob;
chip->controller = &ctrl->controller;
+ ctrl->controller.controller_wp = 1;
/*
* The bootloader might have configured 16bit mode but
@@ -3299,7 +3245,7 @@ err:
}
EXPORT_SYMBOL_GPL(brcmnand_probe);
-int brcmnand_remove(struct platform_device *pdev)
+void brcmnand_remove(struct platform_device *pdev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct brcmnand_host *host;
@@ -3316,8 +3262,6 @@ int brcmnand_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrl->clk);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
EXPORT_SYMBOL_GPL(brcmnand_remove);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index f1f93d85f50d..928114c0be5e 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -88,7 +88,7 @@ static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
-int brcmnand_remove(struct platform_device *pdev);
+void brcmnand_remove(struct platform_device *pdev);
extern const struct dev_pm_ops brcmnand_pm_ops;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
index 950923d977b7..558f083b92e9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -23,7 +23,7 @@ static int brcmstb_nand_probe(struct platform_device *pdev)
static struct platform_driver brcmstb_nand_driver = {
.probe = brcmstb_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "brcmstb_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
index 089c70fc6edf..bf46c8b85898 100644
--- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
static struct platform_driver iproc_nand_driver = {
.probe = iproc_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "iproc_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5d2ddb037a9a..5243fab9face 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1491,10 +1491,12 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
- /* DBB note: i believe nand_cleanup is necessary here, as
- buffers may have been allocated in nand_base. Check with
- Thomas. FIX ME! */
+ ret = nand_scan(nand, numchips);
+ if (ret)
+ goto fail;
+
+ ret = doc->late_init(mtd);
+ if (ret) {
nand_cleanup(nand);
goto fail;
}
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 20bb1e0cb5eb..f0e2318ce088 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -21,7 +21,7 @@
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
for IFC NAND Machine */
struct fsl_ifc_ctrl;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 71ec4052e52a..cdb58aca59c0 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -90,6 +90,8 @@
/* eMMC clock register, misc control */
#define CLK_SELECT_NAND BIT(31)
+#define CLK_ALWAYS_ON_NAND BIT(24)
+#define CLK_SELECT_FIX_PLL2 BIT(6)
#define NFC_CLK_CYCLE 6
@@ -509,7 +511,7 @@ static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
*info |= oob_buf[count];
*info |= oob_buf[count + 1] << 8;
@@ -522,7 +524,7 @@ static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
oob_buf[count] = *info;
oob_buf[count + 1] = *info >> 8;
@@ -1154,7 +1156,7 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ writel(CLK_ALWAYS_ON_NAND | CLK_SELECT_NAND | CLK_SELECT_FIX_PLL2,
nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 9e24bedffd89..3b3ce2926f5d 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -366,6 +366,10 @@ static int nand_check_wp(struct nand_chip *chip)
if (chip->options & NAND_BROKEN_XD)
return 0;
+ /* controller responsible for NAND write protect */
+ if (chip->controller->controller_wp)
+ return 0;
+
/* Check the WP bit */
ret = nand_status_op(chip, &status);
if (ret)
@@ -1207,6 +1211,23 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static void rawnand_cap_cont_reads(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ unsigned int pages_per_lun, first_lun, last_lun;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / pages_per_lun;
+ last_lun = chip->cont_read.last_page / pages_per_lun;
+
+ /* Prevent sequential cache reads across LUN boundaries */
+ if (first_lun != last_lun)
+ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ else
+ chip->cont_read.pause_page = chip->cont_read.last_page;
+}
+
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len, bool check_only)
@@ -1225,7 +1246,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_op_instr cont_instrs[] = {
- NAND_OP_CMD(page == chip->cont_read.last_page ?
+ NAND_OP_CMD(page == chip->cont_read.pause_page ?
NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
@@ -1262,16 +1283,29 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
}
if (page == chip->cont_read.first_page)
- return nand_exec_op(chip, &start_op);
+ ret = nand_exec_op(chip, &start_op);
else
- return nand_exec_op(chip, &cont_op);
+ ret = nand_exec_op(chip, &cont_op);
+ if (ret)
+ return ret;
+
+ if (!chip->cont_read.ongoing)
+ return 0;
+
+ if (page == chip->cont_read.pause_page &&
+ page != chip->cont_read.last_page) {
+ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
+ rawnand_cap_cont_reads(chip);
+ } else if (page == chip->cont_read.last_page) {
+ chip->cont_read.ongoing = false;
+ }
+
+ return 0;
}
static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
{
- return chip->cont_read.ongoing &&
- page >= chip->cont_read.first_page &&
- page <= chip->cont_read.last_page;
+ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
}
/**
@@ -1493,7 +1527,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (naddrs < 0)
@@ -1916,7 +1951,8 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
@@ -3430,21 +3466,42 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int end_page, end_col;
+
+ chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- if ((col && col + readlen < (3 * mtd->writesize)) ||
- (!col && readlen < (2 * mtd->writesize))) {
- chip->cont_read.ongoing = false;
+ end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
+ end_col = (col + readlen) % mtd->writesize;
+
+ if (col)
+ page++;
+
+ if (end_col && end_page)
+ end_page--;
+
+ if (page + 1 > end_page)
return;
- }
- chip->cont_read.ongoing = true;
chip->cont_read.first_page = page;
- if (col)
+ chip->cont_read.last_page = end_page;
+ chip->cont_read.ongoing = true;
+
+ rawnand_cap_cont_reads(chip);
+}
+
+static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
+{
+ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
+ return;
+
+ chip->cont_read.first_page++;
+ if (chip->cont_read.first_page == chip->cont_read.pause_page)
chip->cont_read.first_page++;
- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
}
/**
@@ -3621,6 +3678,8 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagecache.bitflips);
+
+ rawnand_cont_read_skip_first_page(chip, page);
}
readlen -= bytes;
@@ -5125,6 +5184,14 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
/* The supported_op fields should not be set by individual drivers */
WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+ /*
+ * Too many devices do not support sequential cached reads with on-die
+ * ECC correction enabled, so in this case refuse to perform the
+ * automation.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
+ return;
+
if (!nand_has_exec_op(chip))
return;
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index c506e92a3e45..1c76ee98efb7 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -128,7 +128,7 @@ struct pl35x_nand {
* @conf_regs: SMC configuration registers for command phase
* @io_regs: NAND data registers for data phase
* @controller: Core NAND controller structure
- * @chip: NAND chip information structure
+ * @chips: List of connected NAND chips
* @selected_chip: NAND chip currently selected by the controller
* @assigned_cs: List of assigned CS
* @ecc_buf: Temporary buffer to extract ECC bytes
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 596cf9a78274..7baaef69d70a 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -98,7 +98,7 @@ enum nfc_type {
* @high: ECC count high bit index at register.
* @high_mask: mask bit
*/
-struct ecc_cnt_status {
+struct rk_ecc_cnt_status {
u8 err_flag_bit;
u8 low;
u8 low_mask;
@@ -108,6 +108,7 @@ struct ecc_cnt_status {
};
/**
+ * struct nfc_cfg: Rockchip NAND controller configuration
* @type: NFC version
* @ecc_strengths: ECC strengths
* @ecc_cfgs: ECC config values
@@ -144,8 +145,8 @@ struct nfc_cfg {
u32 int_st_off;
u32 oob0_off;
u32 oob1_off;
- struct ecc_cnt_status ecc0;
- struct ecc_cnt_status ecc1;
+ struct rk_ecc_cnt_status ecc0;
+ struct rk_ecc_cnt_status ecc1;
};
struct rk_nfc_nand_chip {
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 3d3d5c9814ff..48c1d0eb66ca 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -105,7 +105,6 @@ struct s3c2410_nand_info;
/**
* struct s3c2410_nand_mtd - driver MTD structure
- * @mtd: The MTD instance to pass to the MTD layer.
* @chip: The NAND chip information.
* @set: The platform information supplied for this set of NAND chips.
* @info: Link back to the hardware information.
@@ -145,7 +144,6 @@ enum s3c_nand_clk_state {
* @clk_rate: The clock rate from @clk.
* @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
- * @freq_transition: CPUFreq notifier block
*/
struct s3c2410_nand_info {
/* mtd info */
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index eddcc0728a67..37f79c019a72 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -276,7 +276,7 @@ static const struct nand_controller_ops txx9ndfmc_controller_ops = {
.attach_chip = txx9ndfmc_attach_chip,
};
-static int __init txx9ndfmc_probe(struct platform_device *dev)
+static int txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
@@ -369,13 +369,11 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
return 0;
}
-static int __exit txx9ndfmc_remove(struct platform_device *dev)
+static void txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int ret, i;
- if (!drvdata)
- return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct mtd_info *mtd = drvdata->mtds[i];
struct nand_chip *chip;
@@ -392,7 +390,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
- return 0;
}
#ifdef CONFIG_PM
@@ -407,14 +404,14 @@ static int txx9ndfmc_resume(struct platform_device *dev)
#endif
static struct platform_driver txx9ndfmc_driver = {
- .remove = __exit_p(txx9ndfmc_remove),
+ .probe = txx9ndfmc_probe,
+ .remove_new = txx9ndfmc_remove,
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
},
};
-
-module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+module_platform_driver(txx9ndfmc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 849ccfedbc72..e0b6715e5dfe 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -974,7 +974,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index e13b8d2dd50a..45d1153a04a0 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -16,12 +16,12 @@
* is to unlock the whole flash array on startup. Therefore, we have to support
* exactly this operation.
*/
-static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -37,7 +37,7 @@ static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return ret;
}
-static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
@@ -69,7 +69,7 @@ static const struct spi_nor_fixups at25fs_nor_fixups = {
* Return: 0 on success, -error otherwise.
*/
static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
- uint64_t len, bool is_protect)
+ u64 len, bool is_protect)
{
int ret;
u8 sr;
@@ -118,20 +118,18 @@ static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
return spi_nor_write_sr(nor, nor->bouncebuf, 1);
}
-static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, true);
}
-static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, false);
}
static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+ u64 len)
{
int ret;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 1c443fe568cf..4129764fad8c 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1060,24 +1060,32 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
}
/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
+ * spi_nor_erase_die() - Erase the entire die.
* @nor: pointer to 'struct spi_nor'.
+ * @addr: address of the die.
+ * @die_size: size of the die.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
{
+ bool multi_die = nor->mtd.size != die_size;
int ret;
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
if (nor->spimem) {
- struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
+ struct spi_mem_op op =
+ SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
+ nor->addr_nbytes, addr, multi_die);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
+ if (multi_die)
+ return -EOPNOTSUPP;
+
ret = spi_nor_controller_ops_write_reg(nor,
SPINOR_OP_CHIP_ERASE,
NULL, 0);
@@ -1792,6 +1800,51 @@ destroy_erase_cmd_list:
return ret;
}
+static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
+ size_t len, size_t die_size)
+{
+ unsigned long timeout;
+ int ret;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(nor->mtd.size / SZ_2M));
+
+ do {
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ return ret;
+ }
+
+ ret = spi_nor_erase_die(nor, addr, die_size);
+
+ spi_nor_unlock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ return ret;
+
+ addr += die_size;
+ len -= die_size;
+
+ } while (len);
+
+ return 0;
+}
+
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error if there is a problem erasing.
@@ -1799,8 +1852,10 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
+ u8 n_dice = nor->params->n_dice;
+ bool multi_die_erase = false;
+ u32 addr, len, rem;
+ size_t die_size;
int ret;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
@@ -1815,39 +1870,22 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr = instr->addr;
len = instr->len;
+ if (n_dice) {
+ die_size = div_u64(mtd->size, n_dice);
+ if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
+ multi_die_erase = true;
+ } else {
+ die_size = mtd->size;
+ }
+
ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
if (ret)
return ret;
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_lock_device(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_write_enable(nor);
- if (ret) {
- spi_nor_unlock_device(nor);
- goto erase_err;
- }
-
- ret = spi_nor_erase_chip(nor);
- spi_nor_unlock_device(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ /* chip (die) erase? */
+ if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
+ multi_die_erase) {
+ ret = spi_nor_erase_dice(nor, addr, len, die_size);
if (ret)
goto erase_err;
@@ -2146,7 +2184,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (is_power_of_2(page_size)) {
page_offset = addr & (page_size - 1);
} else {
- uint64_t aux = addr;
+ u64 aux = addr;
page_offset = do_div(aux, page_size);
}
@@ -2850,9 +2888,6 @@ static void spi_nor_init_flags(struct spi_nor *nor)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
- if (flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
!nor->controller_ops)
nor->flags |= SNOR_F_RWW;
@@ -2897,17 +2932,22 @@ static int spi_nor_late_init_params(struct spi_nor *nor)
return ret;
}
+ /* Needed by some flashes late_init hooks. */
+ spi_nor_init_flags(nor);
+
if (nor->info->fixups && nor->info->fixups->late_init) {
ret = nor->info->fixups->late_init(nor);
if (ret)
return ret;
}
+ if (!nor->params->die_erase_opcode)
+ nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
+
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
- spi_nor_init_flags(nor);
spi_nor_init_fixup_flags(nor);
/*
@@ -3145,8 +3185,20 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
struct spi_nor_flash_parameter *params = nor->params;
int ret;
+ if (enable) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ }
+
ret = params->set_4byte_addr_mode(nor, enable);
- if (ret && ret != -ENOTSUPP)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
if (enable) {
@@ -3193,20 +3245,8 @@ static int spi_nor_init(struct spi_nor *nor)
if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
- !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- err = spi_nor_set_4byte_addr_mode(nor, true);
- if (err)
- return err;
- }
+ !(nor->flags & SNOR_F_4B_OPCODES))
+ return spi_nor_set_4byte_addr_mode(nor, true);
return 0;
}
@@ -3237,7 +3277,8 @@ static void spi_nor_soft_reset(struct spi_nor *nor)
ret = spi_mem_exec_op(nor->spimem, &op);
if (ret) {
- dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ if (ret != -EOPNOTSUPP)
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
return;
}
@@ -3452,9 +3493,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
{
const struct flash_info *info;
struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
int ret;
- int i;
ret = spi_nor_check(nor);
if (ret)
@@ -3518,25 +3557,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
/* No mtd_info fields should be used up to this point. */
spi_nor_set_mtd_info(nor);
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
+ dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
+ SPI_NOR_MAX_ID_LEN, nor->id);
+
return 0;
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 93cd2fc3606d..d36c0e072954 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -85,9 +85,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_CHIP_ERASE_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \
- SPI_MEM_OP_NO_ADDR, \
+#define SPI_NOR_DIE_ERASE_OP(opcode, addr_nbytes, addr, dice) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(dice ? addr_nbytes : 0, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -293,9 +293,9 @@ struct spi_nor_erase_map {
* @is_locked: check if a region of the SPI NOR is completely locked
*/
struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*lock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, u64 len);
};
/**
@@ -362,6 +362,7 @@ struct spi_nor_otp {
* command in octal DTR mode.
* @n_banks: number of banks.
* @n_dice: number of dice in the flash memory.
+ * @die_erase_opcode: die erase opcode. Defaults to SPINOR_OP_CHIP_ERASE.
* @vreg_offset: volatile register offset for each die.
* @hwcaps: describes the read and page program hardware
* capabilities.
@@ -399,6 +400,7 @@ struct spi_nor_flash_parameter {
u8 rdsr_addr_nbytes;
u8 n_banks;
u8 n_dice;
+ u8 die_erase_opcode;
u32 *vreg_offset;
struct spi_nor_hwcaps hwcaps;
@@ -463,7 +465,7 @@ struct spi_nor_id {
* struct flash_info - SPI NOR flash_info entry.
* @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
* older chips).
- * @name: the name of the flash.
+ * @name: (obsolete) the name of the flash. Do not set it for new additions.
* @size: the size of the flash in bytes.
* @sector_size: (optional) the size listed here is what works with
* SPINOR_OP_SE, which isn't necessarily called a "sector" by
@@ -487,7 +489,6 @@ struct spi_nor_id {
* Usually these will power-up in a write-protected
* state.
* SPI_NOR_NO_ERASE: no erase command needed.
- * NO_CHIP_ERASE: chip does not support chip erase.
* SPI_NOR_NO_FR: can't do fastread.
* SPI_NOR_QUAD_PP: flash supports Quad Input Page Program.
* SPI_NOR_RWW: flash supports reads while write.
@@ -537,10 +538,9 @@ struct flash_info {
#define SPI_NOR_BP3_SR_BIT6 BIT(4)
#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
#define SPI_NOR_NO_ERASE BIT(6)
-#define NO_CHIP_ERASE BIT(7)
-#define SPI_NOR_NO_FR BIT(8)
-#define SPI_NOR_QUAD_PP BIT(9)
-#define SPI_NOR_RWW BIT(10)
+#define SPI_NOR_NO_FR BIT(7)
+#define SPI_NOR_QUAD_PP BIT(8)
+#define SPI_NOR_RWW BIT(9)
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index 6e163cb5b478..2dbda6b6938a 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -138,7 +138,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf);
+ seq_printf(s, " %02x (%s)\n", nor->params->die_erase_opcode, buf);
}
seq_puts(s, "\nsector map\n");
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 8920547c12bf..3c6499fdb712 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -11,6 +11,7 @@
/* flash_info mfr_flag. Used to read proprietary FSR register. */
#define USE_FSR BIT(0)
+#define SPINOR_OP_MT_DIE_ERASE 0xc4 /* Chip (die) erase opcode */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
@@ -192,6 +193,50 @@ static struct spi_nor_fixups mt25qu512a_fixups = {
.post_bfpt = mt25qu512a_post_bfpt_fixup,
};
+static int st_nor_four_die_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 4;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
+}
+
+static int st_nor_two_die_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 2;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
+}
+
+static struct spi_nor_fixups n25q00_fixups = {
+ .late_init = st_nor_four_die_late_init,
+};
+
+static struct spi_nor_fixups mt25q01_fixups = {
+ .late_init = st_nor_two_die_late_init,
+};
+
+static struct spi_nor_fixups mt25q02_fixups = {
+ .late_init = st_nor_four_die_late_init,
+};
+
static const struct flash_info st_nor_parts[] = {
{
.name = "m25p05-nonjedec",
@@ -366,16 +411,17 @@ static const struct flash_info st_nor_parts[] = {
.name = "n25q00",
.size = SZ_128M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE,
+ SPI_NOR_BP3_SR_BIT6,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
}, {
.id = SNOR_ID(0x20, 0xba, 0x22),
.name = "mt25ql02g",
.size = SZ_256M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x15),
.name = "n25q016a",
@@ -430,19 +476,24 @@ static const struct flash_info st_nor_parts[] = {
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
}, {
+ .id = SNOR_ID(0x20, 0xbb, 0x21, 0x10, 0x44, 0x00),
+ .name = "mt25qu01g",
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25q01_fixups,
+ }, {
.id = SNOR_ID(0x20, 0xbb, 0x21),
.name = "n25q00a",
.size = SZ_128M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x22),
.name = "mt25qu02g",
.size = SZ_256M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
}
};
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index b3b11dfed789..57713de32832 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -446,6 +446,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
u32 dword;
u16 half;
u8 erase_mask;
+ u8 wait_states, mode_clocks, opcode;
/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
@@ -631,6 +632,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+ /* Parse 1-1-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_1_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_1_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Parse 1-8-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_8_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_8_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_8_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_8_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_8_8);
+ }
+
/* 8D-8D-8D command extension. */
switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
case BFPT_DWORD18_CMD_EXT_REP:
@@ -968,6 +995,8 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
{ SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
{ SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
{ SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ { SNOR_HWCAPS_READ_1_1_8, BIT(20) },
+ { SNOR_HWCAPS_READ_1_8_8, BIT(21) },
};
static const struct sfdp_4bait programs[] = {
{ SNOR_HWCAPS_PP, BIT(6) },
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index 6eb99e1cdd61..da0fe5aa9bb0 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -118,6 +118,13 @@ struct sfdp_bfpt {
(BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B)
#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+#define BFPT_DWORD17_RD_1_1_8_CMD GENMASK(31, 24)
+#define BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS GENMASK(23, 21)
+#define BFPT_DWORD17_RD_1_1_8_WAIT_STATES GENMASK(20, 16)
+#define BFPT_DWORD17_RD_1_8_8_CMD GENMASK(15, 8)
+#define BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS GENMASK(7, 5)
+#define BFPT_DWORD17_RD_1_8_8_WAIT_STATES GENMASK(4, 0)
+
#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 12921344373d..6cc237c24e07 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -17,6 +17,7 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
+#define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
@@ -644,6 +645,7 @@ static int s25hx_t_late_init(struct spi_nor *nor)
params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
+ params->die_erase_opcode = SPINOR_OP_CYPRESS_DIE_ERASE;
return 0;
}
@@ -933,7 +935,6 @@ static const struct flash_info spansion_nor_parts[] = {
.id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90),
.name = "s25hl02gt",
.mfr_flags = USE_CLPEF,
- .flags = NO_CHIP_ERASE,
.fixups = &s25hx_t_fixups
}, {
.id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90),
@@ -954,7 +955,6 @@ static const struct flash_info spansion_nor_parts[] = {
.id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90),
.name = "s25hs02gt",
.mfr_flags = USE_CLPEF,
- .flags = NO_CHIP_ERASE,
.fixups = &s25hx_t_fixups
}, {
.id = SNOR_ID(0x34, 0x5a, 0x1a),
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 44d2a546bf17..180b7390690c 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -13,12 +13,12 @@
#define SST26VF_CR_BPNV BIT(3)
-static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -38,7 +38,7 @@ static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return spi_nor_global_block_unlock(nor);
}
-static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 585813310ee1..e48c3cff247a 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -53,7 +53,7 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
}
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
+ u64 *len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -90,10 +90,10 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
* (if @locked is false); false otherwise.
*/
static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr, bool locked)
+ u64 len, u8 sr, bool locked)
{
loff_t lock_offs, lock_offs_max, offs_max;
- uint64_t lock_len;
+ u64 lock_len;
if (!len)
return true;
@@ -111,14 +111,13 @@ static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
return (ofs >= lock_offs_max) || (offs_max <= lock_offs);
}
-static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
+static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, u64 len, u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
}
-static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr)
+static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len,
+ u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
}
@@ -156,7 +155,7 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -246,7 +245,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -331,7 +330,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
* negative on errors.
*/
-static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -353,7 +352,7 @@ void spi_nor_init_default_locking_ops(struct spi_nor *nor)
nor->params->locking_ops = &spi_nor_sr_locking_ops;
}
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -368,7 +367,7 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -383,7 +382,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index 2dfdc555a69f..96064e4babf0 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -78,6 +78,8 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
return 0;
+ if (attr == &dev_attr_partname.attr && !nor->info->name)
+ return 0;
if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id)
return 0;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 04da685c36be..211f279a33a9 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -18,7 +18,6 @@
struct ssfdcr_record {
struct mtd_blktrans_dev mbd;
- int usecount;
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 579eebb6fc56..e1f1e646cf48 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12093,6 +12093,8 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp, 0);
bnxt_hwrm_port_qstats_ext(bp, 0);
@@ -13093,8 +13095,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 9282403d1bf6..2d7ae71287b1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
+ len_stat |= DMA_TX_APPEND_CRC;
+
if (!i) {
- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ len_stat |= DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b618797a7e8d..f1695c889d3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1041,7 +1041,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
return;
order = get_order(alloc_size);
- if (order > MAX_ORDER) {
+ if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4e18b4cefa97..94ac36b1408b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -48,7 +48,7 @@
* of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
* some padding.
*
- * But the size of a single DMA region is limited by MAX_ORDER in the
+ * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
* kernel (about 16MB currently). To support say 4K Jumbo frames, we
* use a set of LTBs (struct ltb_set) per pool.
*
@@ -75,7 +75,7 @@
* pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
* plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
*/
-#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
#define IBMVNIC_LTB_SET_SIZE (38 << 20)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1ab8dbe2d880..d5519af34657 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -107,12 +107,18 @@ static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
+ struct netdev_hw_addr_list *ha_list;
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
- netdev_for_each_mc_addr(ha, netdev) {
+ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
+ ha_list = &netdev->uc;
+ else
+ ha_list = &netdev->mc;
+
+ netdev_hw_addr_list_for_each(ha, ha_list) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
@@ -16512,6 +16518,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
return;
i40e_reset_and_rebuild(pf, false, false);
+#ifdef CONFIG_PCI_IOV
+ i40e_restore_all_vfs_msi_state(pdev);
+#endif /* CONFIG_PCI_IOV */
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3f99eb198245..de5ec4e6bedf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -154,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ u16 pos;
+
+ /* Continue only if this is a PF */
+ if (!pdev->is_physfn)
+ return;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vf_dev = NULL;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
+ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
+ pci_restore_msi_state(vf_dev);
+ }
+ }
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* i40e_vc_notify_vf_reset
* @vf: pointer to the VF structure
@@ -3521,16 +3547,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
bool found = false;
int bkt;
- if (!tc_filter->action) {
+ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
dev_info(&pf->pdev->dev,
- "VF %d: Currently ADq doesn't support Drop Action\n",
- vf->vf_id);
+ "VF %d: ADQ doesn't support this action (%d)\n",
+ vf->vf_id, tc_filter->action);
goto err;
}
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ tc_filter->action_meta > vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 2ee0f8a23248..5fd607c0de0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -137,6 +137,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index d7fdb7ba7268..fbd5d92182d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1359,8 +1359,9 @@ struct ice_aqc_get_link_status_data {
u8 lp_flowcontrol;
#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
#define ICE_AQC_LS_DATA_SIZE_V2 \
- offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol)
+ offsetofend(struct ice_aqc_get_link_status_data, reserved5)
} __packed;
/* Set event mask command (direct 0x0613) */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9a6c25f98632..edac34c796ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5332,7 +5332,6 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- const s64 nsec_per_psec = 1000LL;
struct ice_aq_desc desc;
int status;
@@ -5348,8 +5347,7 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
*phase_offset = le32_to_cpu(cmd->phase_offset_h);
*phase_offset <<= 32;
*phase_offset += le32_to_cpu(cmd->phase_offset_l);
- *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
- nsec_per_psec);
+ *phase_offset = sign_extend64(*phase_offset, 47);
*eec_mode = cmd->eec_mode;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index fb9c93f37e84..adfdea1e2805 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2146,7 +2146,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
- return -EPERM;
+ return -ENOMEDIUM;
ice_print_topo_conflict(vsi);
@@ -9187,8 +9187,14 @@ int ice_stop(struct net_device *netdev)
int link_err = ice_force_phys_link_state(vsi, false);
if (link_err) {
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
+ if (link_err == -ENOMEDIUM)
+ netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
+ vsi->vsi_num);
+ else
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+
+ ice_vsi_close(vsi);
return -EIO;
}
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 81288a17da2a..20c4b3a64710 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1044,7 +1044,6 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
}
idpf_rx_sync_for_cpu(rx_buf, fields.size);
- skb = rx_q->skb;
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 1f728a9004d9..9e942e5baf39 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -396,7 +396,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq)
return;
- if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
+ if (rxq->skb) {
dev_kfree_skb_any(rxq->skb);
rxq->skb = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 07e72c72d156..8dc837889723 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -1104,9 +1104,9 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- __DECLARE_FLEX_ARRAY(u8, key_flex);
-};
-VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+ u8 key_flex[];
+} __packed;
+VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
/**
* struct virtchnl2_queue_chunk - chunk of contiguous queues
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index f48f82d5e274..85cc16396506 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -568,6 +568,7 @@ struct igc_nfc_filter {
u16 etype;
__be16 vlan_etype;
u16 vlan_tci;
+ u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
u8 user_data[8];
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 785eaa8e0ba8..859b2636f3d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -958,6 +958,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -980,10 +981,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
fsp->flow_type |= FLOW_EXT;
fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
@@ -1218,6 +1225,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
+ rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
@@ -1255,11 +1263,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
}
- /* When multiple filter options or user data or vlan etype is set, use a
- * flex filter.
+ /* The i225/i226 has various different filters. Flex filters provide a
+ * way to match up to the first 128 bytes of a packet. Use them for:
+ * a) For specific user data
+ * b) For VLAN EtherType
+ * c) For full TCI match
+ * d) Or in case multiple filter criteria are set
+ *
+ * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
*/
if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
(rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
+ rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
(rule->filter.match_flags & (rule->filter.match_flags - 1)))
rule->flex = true;
else
@@ -1329,6 +1345,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
+ /* There are two ways to match the VLAN TCI:
+ * 1. Match on PCP field and use vlan prio filter for it
+ * 2. Match on complete TCI field and use flex filter for it
+ */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_tci &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
+ fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* VLAN EtherType can only be matched by full mask. */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_etype &&
+ fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (fsp->location >= IGC_MAX_RXNFC_RULES) {
netdev_dbg(netdev, "Invalid location\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index a9c08321aca9..22cefb1eeedf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCC(i), tqavcc);
wr32(IGC_TQAVHC(i),
- 0x80000000 + ring->hicredit * 0x7735);
+ 0x80000000 + ring->hicredit * 0x7736);
} else {
/* Disable any CBS for the queue */
txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index ab3e39eef2eb..8c0732c9a7ee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -528,7 +528,7 @@ struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
-};
+} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@@ -536,7 +536,7 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
-};
+} __packed;
struct npc_lt_def_apad {
u8 ltype_mask;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index cce2806aaa50..8802961b8889 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -905,6 +905,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 15a319684ed3..38acdc7a73bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -465,6 +465,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
}
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
{
struct mac_ops *mac_ops;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4227ebb4a758..58744313f0eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4143,90 +4143,18 @@ static void nix_find_link_frs(struct rvu *rvu,
req->minlen = minlen;
}
-static int
-nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
- u16 pcifunc, u64 tx_credits)
-{
- struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
- u8 cgx_id = 0, lmac_id = 0;
- unsigned long poll_tmo;
- bool restore_tx_en = 0;
- struct nix_hw *nix_hw;
- u64 cfg, sw_xoff = 0;
- u32 schq = 0;
- u32 credits;
- int rc;
-
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return NIX_AF_ERR_INVALID_NIXBLK;
-
- if (tx_credits == nix_hw->tx_credits[link])
- return 0;
-
- /* Enable cgx tx if disabled for credits to be back */
- if (is_pf_cgxmapped(rvu, pf)) {
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
- }
-
- mutex_lock(&rvu->rsrc_lock);
- /* Disable new traffic to link */
- if (hw->cap.nix_shaping) {
- schq = nix_get_tx_link(rvu, pcifunc);
- sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
- rvu_write64(rvu, blkaddr,
- NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
- }
-
- rc = NIX_AF_ERR_LINK_CREDITS;
- poll_tmo = jiffies + usecs_to_jiffies(200000);
- /* Wait for credits to return */
- do {
- if (time_after(jiffies, poll_tmo))
- goto exit;
- usleep_range(100, 200);
-
- cfg = rvu_read64(rvu, blkaddr,
- NIX_AF_TX_LINKX_NORM_CREDIT(link));
- credits = (cfg >> 12) & 0xFFFFFULL;
- } while (credits != nix_hw->tx_credits[link]);
-
- cfg &= ~(0xFFFFFULL << 12);
- cfg |= (tx_credits << 12);
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- rc = 0;
-
- nix_hw->tx_credits[link] = tx_credits;
-
-exit:
- /* Enable traffic back */
- if (hw->cap.nix_shaping && !sw_xoff)
- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
-
- /* Restore state of cgx tx */
- if (restore_tx_en)
- rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
-
- mutex_unlock(&rvu->rsrc_lock);
- return rc;
-}
-
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
- int blkaddr, schq, link = -1;
- struct nix_txsch *txsch;
- u64 cfg, lmac_fifo_len;
+ int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx = 0, lmac = 0;
u16 max_mtu;
+ u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -4247,25 +4175,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
return NIX_AF_ERR_FRS_INVALID;
- /* Check if requester wants to update SMQ's */
- if (!req->update_smq)
- goto rx_frscfg;
-
- /* Update min/maxlen in each of the SMQ attached to this PF/VF */
- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
- mutex_lock(&rvu->rsrc_lock);
- for (schq = 0; schq < txsch->schq.max; schq++) {
- if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
- continue;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
- if (req->update_minlen)
- cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
- }
- mutex_unlock(&rvu->rsrc_lock);
-
-rx_frscfg:
/* Check if config is for SDP link */
if (req->sdp_link) {
if (!hw->sdp_links)
@@ -4288,7 +4197,6 @@ rx_frscfg:
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
-
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
@@ -4298,19 +4206,7 @@ linkcfg:
cfg = (cfg & ~0xFFFFULL) | req->minlen;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
- if (req->sdp_link || pf == 0)
- return 0;
-
- /* Update transmit credits for CGX links */
- lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
- if (!lmac_fifo_len) {
- dev_err(rvu->dev,
- "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
- __func__, cgx, lmac);
- return 0;
- }
- return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
- (lmac_fifo_len - req->maxlen) / 16);
+ return 0;
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@@ -4841,7 +4737,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
- return rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ if (err)
+ return err;
+
+ rvu_cgx_tx_enable(rvu, pcifunc, true);
+
+ return 0;
}
#define RX_SA_BASE GENMASK_ULL(52, 7)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 0d5a41a2ae01..227d01cace3f 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
priv->stats.rx_truncate_errors++;
}
+ /* Read receive consumer index before replenish so that this routine
+ * returns accurate return value even if packet is received into
+ * just-replenished buffer prior to exiting this routine.
+ */
+ rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+ rx_ci_rem = rx_ci % priv->rx_q_entries;
+
/* Let hardware know we've replenished one buffer */
rx_pi++;
@@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
rx_pi_rem = rx_pi % priv->rx_q_entries;
if (rx_pi_rem == 0)
priv->valid_polarity ^= 1;
- rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
- rx_ci_rem = rx_ci % priv->rx_q_entries;
if (skb)
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 88e26c120b48..54f2eac11a63 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -156,7 +156,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
msg = &kss->spi_msg2;
xfer = kss->spi_xfer2;
@@ -180,7 +180,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
ret = spi_sync(kss->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
+ else if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
else
memcpy(rxb, trx + 2, rxl);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0d57ffcedf0c..fc78bc959ded 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->lrg_buf_q_alloc_size,
qdev->lrg_buf_q_alloc_virt_addr,
qdev->lrg_buf_q_alloc_phy_addr);
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bb787a52bc75..81fd31f6fac4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1211,7 +1211,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 664eda4b5a11..8649b3e90edb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
return -ETIMEDOUT;
}
-static int ravb_config(struct net_device *ndev)
+static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
{
+ u32 csr_ops = 1U << (opmode & CCC_OPC);
+ u32 ccc_mask = CCC_OPC;
int error;
- /* Set config mode */
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
- /* Check if the operating mode is changed to the config mode */
- error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
- if (error)
- netdev_err(ndev, "failed to switch device to config mode\n");
+ /* If gPTP active in config mode is supported it needs to be configured
+ * along with CSEL and operating mode in the same access. This is a
+ * hardware limitation.
+ */
+ if (opmode & CCC_GAC)
+ ccc_mask |= CCC_GAC | CCC_CSEL;
+
+ /* Set operating mode */
+ ravb_modify(ndev, CCC, ccc_mask, opmode);
+ /* Check if the operating mode is changed to the requested one */
+ error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
+ if (error) {
+ netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
+ opmode & CCC_OPC);
+ }
return error;
}
@@ -673,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev)
int error;
/* Set CONFIG mode */
- error = ravb_config(ndev);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
@@ -682,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev)
return error;
/* Setting the control will start the AVB-DMAC process. */
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
-
- return 0;
+ return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
}
static void ravb_get_tx_tstamp(struct net_device *ndev)
@@ -1046,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev)
return error;
/* Stop AVB-DMAC process */
- return ravb_config(ndev);
+ return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
/* E-MAC interrupt handler */
@@ -2560,21 +2569,25 @@ static int ravb_set_gti(struct net_device *ndev)
return 0;
}
-static void ravb_set_config_mode(struct net_device *ndev)
+static int ravb_set_config_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
if (info->gptp) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
} else if (info->ccc_gac) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
- CCC_GAC | CCC_CSEL_HPB);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
} else {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
+
+ return error;
}
/* Set tx and rx clock internal delay modes */
@@ -2794,7 +2807,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ravb_ethtool_ops;
/* Set AVB config mode */
- ravb_set_config_mode(ndev);
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_disable_gptp_clk;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */
@@ -2917,8 +2932,7 @@ static void ravb_remove(struct platform_device *pdev)
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- /* Set reset mode */
- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
@@ -3000,8 +3014,11 @@ static int __maybe_unused ravb_resume(struct device *dev)
int ret = 0;
/* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled)
- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ if (priv->wol_enabled) {
+ ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (ret)
+ return ret;
+ }
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
@@ -3009,7 +3026,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
*/
/* Set AVB config mode */
- ravb_set_config_mode(ndev);
+ ret = ravb_set_config_mode(ndev);
+ if (ret)
+ return ret;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index d2f35ee15eff..fac227d372db 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx)
}
if (!success) {
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
kfree(channel->rps_flow_id);
+ channel->rps_flow_id = NULL;
+ }
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 0d98defb011e..0ec7412febc7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -346,12 +346,6 @@ static inline void *port_priv(struct gelic_port *port)
return port->priv;
}
-#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-void udbg_shutdown_ps3gelic(void);
-#else
-static inline void udbg_shutdown_ps3gelic(void) {}
-#endif
-
int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
void gelic_card_up(struct gelic_card *card);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 3777c7e2e6fc..e47bb125048d 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
struct ax88172a_private *priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d16f592c2061..51b1868d2f22 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
};
};
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static bool is_xdp_frame(void *ptr)
@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtnet_rq_free_buf(struct virtnet_info *vi,
+ struct receive_queue *rq, void *buf)
+{
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(rq, buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void enable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
return buf;
}
-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
-{
- void *buf;
-
- buf = virtqueue_detach_unused_buf(rq->vq);
- if (buf && rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
-
- return buf;
-}
-
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_rq_dma *dma;
@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
}
}
+static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ struct receive_queue *rq;
+ int i = vq2rxq(vq);
+
+ rq = &vi->rq[i];
+
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
+
+ virtnet_rq_free_buf(vi, rq, buf);
+}
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
@@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
DEV_STATS_INC(dev, rx_length_errors);
- virtnet_rq_free_unused_buf(rq->vq, buf);
+ virtnet_rq_free_buf(vi, rq, buf);
return;
}
@@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (running)
napi_disable(&rq->napi);
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
xdp_return_frame(ptr_to_xdp(buf));
}
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
-{
- struct virtnet_info *vi = vq->vdev->priv;
- int i = vq2rxq(vq);
-
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
- give_pages(&vi->rq[i], buf);
- else
- put_page(virt_to_head_page(buf));
-}
-
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
}
for (i = 0; i < vi->max_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
+ struct virtqueue *vq = vi->rq[i].vq;
- while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
- virtnet_rq_free_unused_buf(rq->vq, buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_unmap_free_buf(vq, buf);
cond_resched();
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 56def20374f3..7805a42948af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -770,7 +770,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
@@ -817,7 +817,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index bc6a9f861711..07931c2db494 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1783,7 +1783,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta;
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -1807,7 +1807,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
isr_stats->rfkill++;
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, from_irq);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
@@ -1947,7 +1947,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -2370,7 +2370,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 92253260f568..d10208075ae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, false);
return hw_rfkill;
}
@@ -1237,7 +1237,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1264,7 +1264,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_pcie_synchronize_irqs(trans);
+ if (!from_irq)
+ iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -1454,7 +1455,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
if (hw_rfkill != was_in_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1469,12 +1470,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_stop_device(trans);
+ _iwl_trans_pcie_stop_device(trans, false);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1487,7 +1488,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
if (trans->trans_cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans);
else
- _iwl_trans_pcie_stop_device(trans);
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
}
@@ -2887,7 +2888,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
trans_pcie->debug_rfkill, new_value);
trans_pcie->debug_rfkill = new_value;
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, false);
return count;
}
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index 72921e4f35f6..12df4d88970c 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -32,12 +32,11 @@ static void nubus_device_remove(struct device *dev)
ndrv->remove(to_nubus_board(dev));
}
-struct bus_type nubus_bus_type = {
+static const struct bus_type nubus_bus_type = {
.name = "nubus",
.probe = nubus_device_probe,
.remove = nubus_device_remove,
};
-EXPORT_SYMBOL(nubus_bus_type);
int nubus_driver_register(struct nubus_driver *ndrv)
{
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 84f345c69ea5..c4e0432ae42a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
* @opp: opp for which level value has to be returned for
*
* Return: level read from device tree corresponding to the opp, else
- * return 0.
+ * return U32_MAX.
*/
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
{
@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
* @index: index of the required opp
*
* Return: performance state read from device tree corresponding to the
- * required opp, else return 0.
+ * required opp, else return U32_MAX.
*/
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
unsigned int index)
@@ -808,6 +808,16 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
struct dev_pm_opp *opp;
opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ if (IS_ERR(opp))
+ return opp;
+
+ /* False match */
+ if (temp == OPP_LEVEL_UNSET) {
+ dev_err(dev, "%s: OPP levels aren't available\n", __func__);
+ dev_pm_opp_put(opp);
+ return ERR_PTR(-ENODEV);
+ }
+
*level = temp;
return opp;
}
@@ -832,9 +842,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
* use.
*/
struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
- unsigned long *level)
+ unsigned int *level)
{
- return _find_key_floor(dev, level, 0, true, _read_level, NULL);
+ unsigned long temp = *level;
+ struct dev_pm_opp *opp;
+
+ opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL);
+ *level = temp;
+ return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor);
@@ -948,7 +963,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
} else {
- opp_table->rate_clk_single = freq;
+ opp_table->current_rate_single_clk = freq;
}
return ret;
@@ -1046,40 +1061,23 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_performance_state(struct device *dev, struct device *pd_dev,
- struct dev_pm_opp *opp, int i)
+/* This is only called for PM domain for now */
+static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, bool up)
{
- unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
- int ret;
+ struct device **devs = opp_table->required_devs;
+ struct dev_pm_opp *required_opp;
+ int index, target, delta, ret;
- if (!pd_dev)
+ if (!devs)
return 0;
- ret = dev_pm_domain_set_performance_state(pd_dev, pstate);
- if (ret) {
- dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
- dev_name(pd_dev), pstate, ret);
- }
-
- return ret;
-}
-
-static int _opp_set_required_opps_generic(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
-{
- dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n");
- return -ENOENT;
-}
-
-static int _opp_set_required_opps_genpd(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
-{
- struct device **genpd_virt_devs =
- opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev;
- int index, target, delta, ret;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return -EBUSY;
/* Scaling up? Set required OPPs in normal order, else reverse */
- if (!scaling_down) {
+ if (up) {
index = 0;
target = opp_table->required_opp_count;
delta = 1;
@@ -1090,9 +1088,13 @@ static int _opp_set_required_opps_genpd(struct device *dev,
}
while (index != target) {
- ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index);
- if (ret)
- return ret;
+ if (devs[index]) {
+ required_opp = opp ? opp->required_opps[index] : NULL;
+
+ ret = dev_pm_opp_set_opp(devs[index], required_opp);
+ if (ret)
+ return ret;
+ }
index += delta;
}
@@ -1100,34 +1102,6 @@ static int _opp_set_required_opps_genpd(struct device *dev,
return 0;
}
-/* This is only called for PM domain for now */
-static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, bool up)
-{
- /* required-opps not fully initialized yet */
- if (lazy_linking_pending(opp_table))
- return -EBUSY;
-
- if (opp_table->set_required_opps)
- return opp_table->set_required_opps(dev, opp_table, opp, up);
-
- return 0;
-}
-
-/* Update set_required_opps handler */
-void _update_set_required_opps(struct opp_table *opp_table)
-{
- /* Already set */
- if (opp_table->set_required_opps)
- return;
-
- /* All required OPPs will belong to genpd or none */
- if (opp_table->required_opp_tables[0]->is_genpd)
- opp_table->set_required_opps = _opp_set_required_opps_genpd;
- else
- opp_table->set_required_opps = _opp_set_required_opps_generic;
-}
-
static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
struct dev_pm_opp *opp)
{
@@ -1135,7 +1109,7 @@ static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
int ret = 0;
if (opp) {
- if (!opp->level)
+ if (opp->level == OPP_LEVEL_UNSET)
return 0;
level = opp->level;
@@ -1378,12 +1352,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* value of the frequency. In such a case, do not abort but
* configure the hardware to the desired frequency forcefully.
*/
- forced = opp_table->rate_clk_single != target_freq;
+ forced = opp_table->current_rate_single_clk != freq;
}
- ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
+ ret = _set_opp(dev, opp_table, opp, &freq, forced);
- if (target_freq)
+ if (freq)
dev_pm_opp_put(opp);
put_opp_table:
@@ -1867,6 +1841,8 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
INIT_LIST_HEAD(&opp->node);
+ opp->level = OPP_LEVEL_UNSET;
+
return opp;
}
@@ -2388,19 +2364,13 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
- if (!opp_table->genpd_virt_devs)
- return;
-
for (index = 0; index < opp_table->required_opp_count; index++) {
- if (!opp_table->genpd_virt_devs[index])
+ if (!opp_table->required_devs[index])
continue;
- dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
- opp_table->genpd_virt_devs[index] = NULL;
+ dev_pm_domain_detach(opp_table->required_devs[index], false);
+ opp_table->required_devs[index] = NULL;
}
-
- kfree(opp_table->genpd_virt_devs);
- opp_table->genpd_virt_devs = NULL;
}
/*
@@ -2427,14 +2397,20 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
int index = 0, ret = -EINVAL;
const char * const *name = names;
- if (opp_table->genpd_virt_devs)
- return 0;
+ if (!opp_table->required_devs) {
+ dev_err(dev, "Required OPPs not available, can't attach genpd\n");
+ return -EINVAL;
+ }
- opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
- sizeof(*opp_table->genpd_virt_devs),
- GFP_KERNEL);
- if (!opp_table->genpd_virt_devs)
- return -ENOMEM;
+ /* Genpd core takes care of propagation to parent genpd */
+ if (opp_table->is_genpd) {
+ dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* Checking only the first one is enough ? */
+ if (opp_table->required_devs[0])
+ return 0;
while (*name) {
if (index >= opp_table->required_opp_count) {
@@ -2450,13 +2426,25 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
goto err;
}
- opp_table->genpd_virt_devs[index] = virt_dev;
+ /*
+ * Add the virtual genpd device as a user of the OPP table, so
+ * we can call dev_pm_opp_set_opp() on it directly.
+ *
+ * This will be automatically removed when the OPP table is
+ * removed, don't need to handle that here.
+ */
+ if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ opp_table->required_devs[index] = virt_dev;
index++;
name++;
}
if (virt_devs)
- *virt_devs = opp_table->genpd_virt_devs;
+ *virt_devs = opp_table->required_devs;
return 0;
@@ -2466,10 +2454,50 @@ err:
}
+static int _opp_set_required_devs(struct opp_table *opp_table,
+ struct device *dev,
+ struct device **required_devs)
+{
+ int i;
+
+ if (!opp_table->required_devs) {
+ dev_err(dev, "Required OPPs not available, can't set required devs\n");
+ return -EINVAL;
+ }
+
+ /* Another device that shares the OPP table has set the required devs ? */
+ if (opp_table->required_devs[0])
+ return 0;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ /* Genpd core takes care of propagation to parent genpd */
+ if (required_devs[i] && opp_table->is_genpd &&
+ opp_table->required_opp_tables[i]->is_genpd) {
+ dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ opp_table->required_devs[i] = required_devs[i];
+ }
+
+ return 0;
+}
+
+static void _opp_put_required_devs(struct opp_table *opp_table)
+{
+ int i;
+
+ for (i = 0; i < opp_table->required_opp_count; i++)
+ opp_table->required_devs[i] = NULL;
+}
+
static void _opp_clear_config(struct opp_config_data *data)
{
- if (data->flags & OPP_CONFIG_GENPD)
+ if (data->flags & OPP_CONFIG_REQUIRED_DEVS)
+ _opp_put_required_devs(data->opp_table);
+ else if (data->flags & OPP_CONFIG_GENPD)
_opp_detach_genpd(data->opp_table);
+
if (data->flags & OPP_CONFIG_REGULATOR)
_opp_put_regulators(data->opp_table);
if (data->flags & OPP_CONFIG_SUPPORTED_HW)
@@ -2583,12 +2611,22 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
/* Attach genpds */
if (config->genpd_names) {
+ if (config->required_devs)
+ goto err;
+
ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
config->virt_devs);
if (ret)
goto err;
data->flags |= OPP_CONFIG_GENPD;
+ } else if (config->required_devs) {
+ ret = _opp_set_required_devs(opp_table, dev,
+ config->required_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REQUIRED_DEVS;
}
ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
@@ -2975,6 +3013,47 @@ put_table:
EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
/**
+ * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
+ * @dev: device for which we do this operation
+ *
+ * Sync voltage state of the OPP table regulators.
+ *
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int i, ret = 0;
+
+ /* Device may not have OPP table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ /* Regulator may not be required for the device */
+ if (unlikely(!opp_table->regulators))
+ goto put_table;
+
+ /* Nothing to sync if voltage wasn't changed */
+ if (!opp_table->enabled)
+ goto put_table;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+ ret = regulator_sync_voltage(reg);
+ if (ret)
+ break;
+ }
+put_table:
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
@@ -3097,44 +3176,3 @@ void dev_pm_opp_remove_table(struct device *dev)
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
-
-/**
- * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
- * @dev: device for which we do this operation
- *
- * Sync voltage state of the OPP table regulators.
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_sync_regulators(struct device *dev)
-{
- struct opp_table *opp_table;
- struct regulator *reg;
- int i, ret = 0;
-
- /* Device may not have OPP table */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return 0;
-
- /* Regulator may not be required for the device */
- if (unlikely(!opp_table->regulators))
- goto put_table;
-
- /* Nothing to sync if voltage wasn't changed */
- if (!opp_table->enabled)
- goto put_table;
-
- for (i = 0; i < opp_table->regulator_count; i++) {
- reg = opp_table->regulators[i];
- ret = regulator_sync_voltage(reg);
- if (ret)
- break;
- }
-put_table:
- /* Drop reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 81fa27599d58..f9f0b22bccbb 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -165,7 +165,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct opp_table **required_opp_tables;
struct device_node *required_np, *np;
bool lazy = false;
- int count, i;
+ int count, i, size;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
@@ -179,12 +179,13 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
if (count <= 0)
goto put_np;
- required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
- GFP_KERNEL);
+ size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs);
+ required_opp_tables = kcalloc(count, size, GFP_KERNEL);
if (!required_opp_tables)
goto put_np;
opp_table->required_opp_tables = required_opp_tables;
+ opp_table->required_devs = (void *)(required_opp_tables + count);
opp_table->required_opp_count = count;
for (i = 0; i < count; i++) {
@@ -208,8 +209,6 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
mutex_lock(&opp_table_lock);
list_add(&opp_table->lazy, &lazy_opp_tables);
mutex_unlock(&opp_table_lock);
- } else {
- _update_set_required_opps(opp_table);
}
goto put_np;
@@ -296,7 +295,7 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
of_node_put(opp->np);
}
-static int _link_required_opps(struct dev_pm_opp *opp,
+static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table,
struct opp_table *required_table, int index)
{
struct device_node *np;
@@ -314,6 +313,39 @@ static int _link_required_opps(struct dev_pm_opp *opp,
return -ENODEV;
}
+ /*
+ * There are two genpd (as required-opp) cases that we need to handle,
+ * devices with a single genpd and ones with multiple genpds.
+ *
+ * The single genpd case requires special handling as we need to use the
+ * same `dev` structure (instead of a virtual one provided by genpd
+ * core) for setting the performance state.
+ *
+ * It doesn't make sense for a device's DT entry to have both
+ * "opp-level" and single "required-opps" entry pointing to a genpd's
+ * OPP, as that would make the OPP core call
+ * dev_pm_domain_set_performance_state() for two different values for
+ * the same device structure. Lets treat single genpd configuration as a
+ * case where the OPP's level is directly available without required-opp
+ * link in the DT.
+ *
+ * Just update the `level` with the right value, which
+ * dev_pm_opp_set_opp() will take care of in the normal path itself.
+ *
+ * There is another case though, where a genpd's OPP table has
+ * required-opps set to a parent genpd. The OPP core expects the user to
+ * set the respective required `struct device` pointer via
+ * dev_pm_opp_set_config().
+ */
+ if (required_table->is_genpd && opp_table->required_opp_count == 1 &&
+ !opp_table->required_devs[0]) {
+ /* Genpd core takes care of propagation to parent genpd */
+ if (!opp_table->is_genpd) {
+ if (!WARN_ON(opp->level != OPP_LEVEL_UNSET))
+ opp->level = opp->required_opps[0]->level;
+ }
+ }
+
return 0;
}
@@ -338,7 +370,7 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
if (IS_ERR_OR_NULL(required_table))
continue;
- ret = _link_required_opps(opp, required_table, i);
+ ret = _link_required_opps(opp, opp_table, required_table, i);
if (ret)
goto free_required_opps;
}
@@ -359,7 +391,7 @@ static int lazy_link_required_opps(struct opp_table *opp_table,
int ret;
list_for_each_entry(opp, &opp_table->opp_list, node) {
- ret = _link_required_opps(opp, new_table, index);
+ ret = _link_required_opps(opp, opp_table, new_table, index);
if (ret)
return ret;
}
@@ -422,7 +454,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
/* All required opp-tables found, remove from lazy list */
if (!lazy) {
- _update_set_required_opps(opp_table);
list_del_init(&opp_table->lazy);
list_for_each_entry(opp, &opp_table->opp_list, node)
@@ -1393,8 +1424,14 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
opp = _find_opp_of_np(opp_table, required_np);
if (opp) {
- pstate = opp->level;
+ if (opp->level == OPP_LEVEL_UNSET) {
+ pr_err("%s: OPP levels aren't available for %pOF\n",
+ __func__, np);
+ } else {
+ pstate = opp->level;
+ }
dev_pm_opp_put(opp);
+
}
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 08366f90f16b..cff1fabd1ae3 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -35,6 +35,7 @@ extern struct list_head opp_tables;
#define OPP_CONFIG_PROP_NAME BIT(3)
#define OPP_CONFIG_SUPPORTED_HW BIT(4)
#define OPP_CONFIG_GENPD BIT(5)
+#define OPP_CONFIG_REQUIRED_DEVS BIT(6)
/**
* struct opp_config_data - data for set config operations
@@ -49,6 +50,18 @@ struct opp_config_data {
unsigned int flags;
};
+/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -157,12 +170,12 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @rate_clk_single: Currently configured frequency for single clk.
+ * @current_rate_single_clk: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
- * @genpd_virt_devs: List of virtual devices for multiple genpd support.
* @required_opp_tables: List of device OPP tables that are required by OPPs in
* this table.
+ * @required_devs: List of devices for required OPP tables.
* @required_opp_count: Number of required devices.
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
@@ -180,7 +193,6 @@ enum opp_table_access {
* @path_count: Number of interconnect paths
* @enabled: Set to true if the device's resources are enabled/configured.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_required_opps: Helper responsible to set required OPPs.
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -207,12 +219,12 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long rate_clk_single;
+ unsigned long current_rate_single_clk;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
- struct device **genpd_virt_devs;
struct opp_table **required_opp_tables;
+ struct device **required_devs;
unsigned int required_opp_count;
unsigned int *supported_hw;
@@ -229,8 +241,6 @@ struct opp_table {
unsigned int path_count;
bool enabled;
bool is_genpd;
- int (*set_required_opps)(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down);
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 8f3f13fbbb25..e3b97cd1fbbf 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -373,23 +374,15 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *cpu_dev = get_cpu_device(0);
- const struct of_device_id *match;
const struct ti_opp_supply_of_data *of_data;
int ret = 0;
- match = of_match_device(ti_opp_supply_of_match, dev);
- if (!match) {
- /* We do not expect this to happen */
- dev_err(dev, "%s: Unable to match device\n", __func__);
- return -ENODEV;
- }
- if (!match->data) {
+ of_data = device_get_match_data(dev);
+ if (!of_data) {
/* Again, unlikely.. but mistakes do happen */
dev_err(dev, "%s: Bad data in match\n", __func__);
return -EINVAL;
}
- of_data = match->data;
-
dev_set_drvdata(dev, (void *)of_data);
/* If we need optimized voltage */
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6554a2e89d36..6449056b57dd 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_dword);
+
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(dev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(dev, pos, val);
+}
+EXPORT_SYMBOL(pci_clear_and_set_config_dword);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 30c7dfeccb16..1eaffff40b8d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -650,13 +650,6 @@ static void hv_arch_irq_unmask(struct irq_data *data)
PCI_FUNC(pdev->devfn);
params->int_target.vector = hv_msi_get_int_vector(data);
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 55bc3576a985..bdbf8a94b4d0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1335,6 +1335,9 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
return 0;
}
@@ -1429,6 +1432,9 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ecbcf041179..f43873049d52 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -569,10 +569,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5dab531c8654..5a0066ecc3c5 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -426,17 +426,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
}
}
-static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
- u32 clear, u32 set)
-{
- u32 val;
-
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l12_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -501,10 +490,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
}
/* Program T_POWER_ON times in both ports */
@@ -512,22 +503,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
/* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
/* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
}
}
@@ -687,10 +682,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
*/
/* Disable all L1 substates */
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
/*
* If needed, disable L1, and it gets enabled later
* in pcie_config_aspm_link().
@@ -713,10 +708,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
@@ -1008,6 +1003,25 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
+/* @pdev: the root port or switch downstream port */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !link)
+ return;
+ /*
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
+ */
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 273d67ecf6d2..ec6e0d9194a1 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
+config DWC_PCIE_PMU
+ tristate "Synopsys DesignWare PCIe PMU"
+ depends on PCI
+ help
+ Enable perf support for Synopsys DesignWare PCIe PMU Performance
+ monitoring event on platform including the Alibaba Yitian 710.
+
source "drivers/perf/arm_cspmu/Kconfig"
source "drivers/perf/amlogic/Kconfig"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 16b3ec4db916..a06338e3401c 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
+obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o
obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/
obj-$(CONFIG_MESON_DDR_PMU) += amlogic/
obj-$(CONFIG_CXL_PMU) += cxl_pmu.o
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index cd2de44b61b9..f322e5ca1114 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -524,8 +524,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest)
- return -EINVAL;
+ if (!attr->exclude_guest) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
if (!attr->exclude_kernel)
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 847b0dc41293..c584165b13ba 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -811,7 +811,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
#define CMN_EVENT_HNF_OCC(_model, _name, _event) \
CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
- CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
+ CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 2cc35dded007..50b89b989ce7 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1108,7 +1108,6 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
- u64 acpi_uid;
struct device *cpu_dev;
struct acpi_device *acpi_dev;
@@ -1118,8 +1117,7 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) {
- if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, NULL) &&
- !acpi_dev_uid_to_integer(acpi_dev, &acpi_uid) && acpi_uid == container_uid)
+ if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
return 0;
acpi_dev = acpi_dev_parent(acpi_dev);
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 8223c49bd082..7ec4498e312f 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -371,7 +371,7 @@ static inline u32 dsu_pmu_get_reset_overflow(void)
return __dsu_pmu_get_reset_overflow();
}
-/**
+/*
* dsu_pmu_set_event_period: Set the period for the counter.
*
* All DSU PMU event counters, except the cycle counter are 32bit
@@ -602,7 +602,7 @@ static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
return dsu_pmu;
}
-/**
+/*
* dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster
* from device tree.
*/
@@ -632,7 +632,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
return 0;
}
-/**
+/*
* dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster
* from ACPI.
*/
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d712a19e47ac..8458fe2cebb4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -445,7 +445,7 @@ __hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- int mapping;
+ int mapping, ret;
hwc->flags = 0;
mapping = armpmu->map_event(event);
@@ -470,11 +470,10 @@ __hw_perf_event_init(struct perf_event *event)
/*
* Check whether we need to exclude the counter from certain modes.
*/
- if (armpmu->set_event_filter &&
- armpmu->set_event_filter(hwc, &event->attr)) {
- pr_debug("ARM performance counters do not support "
- "mode exclusion\n");
- return -EOPNOTSUPP;
+ if (armpmu->set_event_filter) {
+ ret = armpmu->set_event_filter(hwc, &event->attr);
+ if (ret)
+ return ret;
}
/*
@@ -893,7 +892,6 @@ struct arm_pmu *armpmu_alloc(void)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 6ca7be05229c..23fa6c5da82c 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -15,6 +15,7 @@
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
@@ -169,7 +170,11 @@ armv8pmu_events_sysfs_show(struct device *dev,
PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
- ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
+ /*
+ * Don't expose the sw_incr event in /sys. It's not usable as writes to
+ * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation
+ * means we don't have a fixed event<->counter relationship regardless.
+ */
ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
@@ -294,26 +299,66 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = {
.is_visible = armv8pmu_event_attr_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-15");
-PMU_FORMAT_ATTR(long, "config1:0");
-PMU_FORMAT_ATTR(rdpmc, "config1:1");
+/* User ABI */
+#define ATTR_CFG_FLD_event_CFG config
+#define ATTR_CFG_FLD_event_LO 0
+#define ATTR_CFG_FLD_event_HI 15
+#define ATTR_CFG_FLD_long_CFG config1
+#define ATTR_CFG_FLD_long_LO 0
+#define ATTR_CFG_FLD_long_HI 0
+#define ATTR_CFG_FLD_rdpmc_CFG config1
+#define ATTR_CFG_FLD_rdpmc_LO 1
+#define ATTR_CFG_FLD_rdpmc_HI 1
+#define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */
+#define ATTR_CFG_FLD_threshold_count_LO 2
+#define ATTR_CFG_FLD_threshold_count_HI 2
+#define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */
+#define ATTR_CFG_FLD_threshold_compare_LO 3
+#define ATTR_CFG_FLD_threshold_compare_HI 4
+#define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */
+#define ATTR_CFG_FLD_threshold_LO 5
+#define ATTR_CFG_FLD_threshold_HI 16
+
+GEN_PMU_FORMAT_ATTR(event);
+GEN_PMU_FORMAT_ATTR(long);
+GEN_PMU_FORMAT_ATTR(rdpmc);
+GEN_PMU_FORMAT_ATTR(threshold_count);
+GEN_PMU_FORMAT_ATTR(threshold_compare);
+GEN_PMU_FORMAT_ATTR(threshold);
static int sysctl_perf_user_access __read_mostly;
-static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+static bool armv8pmu_event_is_64bit(struct perf_event *event)
+{
+ return ATTR_CFG_GET_FLD(&event->attr, long);
+}
+
+static bool armv8pmu_event_want_user_access(struct perf_event *event)
{
- return event->attr.config1 & 0x1;
+ return ATTR_CFG_GET_FLD(&event->attr, rdpmc);
}
-static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
+static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr)
{
- return event->attr.config1 & 0x2;
+ u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare);
+ u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count);
+
+ /*
+ * The count bit is always the bottom bit of the full control field, and
+ * the comparison is the upper two bits, but it's not explicitly
+ * labelled in the Arm ARM. For the Perf interface we split it into two
+ * fields, so reconstruct it here.
+ */
+ return (th_compare << 1) | th_count;
}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
&format_attr_long.attr,
&format_attr_rdpmc.attr,
+ &format_attr_threshold.attr,
+ &format_attr_threshold_compare.attr,
+ &format_attr_threshold_count.attr,
NULL,
};
@@ -327,7 +372,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+ u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", slots);
}
@@ -339,8 +384,7 @@ static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
- & ARMV8_PMU_BUS_SLOTS_MASK;
+ u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", bus_slots);
}
@@ -352,8 +396,7 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
- & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir);
u32 val = 0;
/* Encoded as Log2(number of bytes), plus one */
@@ -365,10 +408,38 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(bus_width);
+static u32 threshold_max(struct arm_pmu *cpu_pmu)
+{
+ /*
+ * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be
+ * impossible to write the threshold in the upper 32 bits of PMEVTYPER.
+ */
+ if (IS_ENABLED(CONFIG_ARM))
+ return 0;
+
+ /*
+ * The largest value that can be written to PMEVTYPER<n>_EL0.TH is
+ * (2 ^ PMMIR.THWIDTH) - 1.
+ */
+ return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1;
+}
+
+static ssize_t threshold_max_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+
+ return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu));
+}
+
+static DEVICE_ATTR_RO(threshold_max);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
&dev_attr_bus_slots.attr,
&dev_attr_bus_width.attr,
+ &dev_attr_threshold_max.attr,
NULL,
};
@@ -397,7 +468,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
}
-static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
+static bool armv8pmu_event_has_user_read(struct perf_event *event)
{
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
}
@@ -407,7 +478,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
* except when we have allocated the 64bit cycle counter (for CPU
* cycles event) or when user space counter access is enabled.
*/
-static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+static bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
@@ -428,36 +499,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-static inline u64 armv8pmu_pmcr_read(void)
+static u64 armv8pmu_pmcr_read(void)
{
return read_pmcr();
}
-static inline void armv8pmu_pmcr_write(u64 val)
+static void armv8pmu_pmcr_write(u64 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
write_pmcr(val);
}
-static inline int armv8pmu_has_overflowed(u32 pmovsr)
+static int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
-static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline u64 armv8pmu_read_evcntr(int idx)
+static u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
return read_pmevcntrn(counter);
}
-static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+static u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
u64 val = armv8pmu_read_evcntr(idx);
@@ -519,14 +590,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
return armv8pmu_unbias_long_counter(event, value);
}
-static inline void armv8pmu_write_evcntr(int idx, u64 value)
+static void armv8pmu_write_evcntr(int idx, u64 value)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_pmevcntrn(counter, value);
}
-static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+static void armv8pmu_write_hw_counter(struct perf_event *event,
u64 value)
{
int idx = event->hw.idx;
@@ -552,15 +623,22 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
armv8pmu_write_hw_counter(event, value);
}
-static inline void armv8pmu_write_evtype(int idx, u32 val)
+static void armv8pmu_write_evtype(int idx, unsigned long val)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ unsigned long mask = ARMV8_PMU_EVTYPE_EVENT |
+ ARMV8_PMU_INCLUDE_EL2 |
+ ARMV8_PMU_EXCLUDE_EL0 |
+ ARMV8_PMU_EXCLUDE_EL1;
- val &= ARMV8_PMU_EVTYPE_MASK;
+ if (IS_ENABLED(CONFIG_ARM64))
+ mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH;
+
+ val &= mask;
write_pmevtypern(counter, val);
}
-static inline void armv8pmu_write_event_type(struct perf_event *event)
+static void armv8pmu_write_event_type(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -594,7 +672,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
return mask;
}
-static inline void armv8pmu_enable_counter(u32 mask)
+static void armv8pmu_enable_counter(u32 mask)
{
/*
* Make sure event configuration register writes are visible before we
@@ -604,7 +682,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
write_pmcntenset(mask);
}
-static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+static void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
@@ -616,7 +694,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
armv8pmu_enable_counter(mask);
}
-static inline void armv8pmu_disable_counter(u32 mask)
+static void armv8pmu_disable_counter(u32 mask)
{
write_pmcntenclr(mask);
/*
@@ -626,7 +704,7 @@ static inline void armv8pmu_disable_counter(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+static void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
@@ -638,18 +716,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
armv8pmu_disable_counter(mask);
}
-static inline void armv8pmu_enable_intens(u32 mask)
+static void armv8pmu_enable_intens(u32 mask)
{
write_pmintenset(mask);
}
-static inline void armv8pmu_enable_event_irq(struct perf_event *event)
+static void armv8pmu_enable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_enable_intens(BIT(counter));
}
-static inline void armv8pmu_disable_intens(u32 mask)
+static void armv8pmu_disable_intens(u32 mask)
{
write_pmintenclr(mask);
isb();
@@ -658,13 +736,13 @@ static inline void armv8pmu_disable_intens(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_irq(struct perf_event *event)
+static void armv8pmu_disable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_disable_intens(BIT(counter));
}
-static inline u32 armv8pmu_getreset_flags(void)
+static u32 armv8pmu_getreset_flags(void)
{
u32 value;
@@ -672,7 +750,7 @@ static inline u32 armv8pmu_getreset_flags(void)
value = read_pmovsclr();
/* Write to clear flags */
- value &= ARMV8_PMU_OVSR_MASK;
+ value &= ARMV8_PMU_OVERFLOWED_MASK;
write_pmovsclr(value);
return value;
@@ -914,9 +992,15 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
-
- if (attr->exclude_idle)
- return -EPERM;
+ struct perf_event *perf_event = container_of(attr, struct perf_event,
+ attr);
+ struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
+ u32 th;
+
+ if (attr->exclude_idle) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
/*
* If we're running in hyp mode, then we *are* the hypervisor.
@@ -946,6 +1030,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/*
+ * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will
+ * be 0 and will also trigger this check, preventing it from being used.
+ */
+ th = ATTR_CFG_GET_FLD(attr, threshold);
+ if (th > threshold_max(cpu_pmu)) {
+ pr_debug("PMU event threshold exceeds max value\n");
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64) && th) {
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th);
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC,
+ armv8pmu_event_threshold_control(attr));
+ }
+
+ /*
* Install the filter into config_base as this is used to
* construct the event type.
*/
@@ -1107,8 +1207,7 @@ static void __armv8pmu_probe_pmu(void *info)
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
- cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
- & ARMV8_PMU_PMCR_N_MASK;
+ cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read());
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
@@ -1221,6 +1320,12 @@ static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
return armv8_pmu_init(cpu_pmu, #name, armv8_pmuv3_map_event); \
}
+#define PMUV3_INIT_MAP_EVENT(name, map_event) \
+static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
+{ \
+ return armv8_pmu_init(cpu_pmu, #name, map_event); \
+}
+
PMUV3_INIT_SIMPLE(armv8_pmuv3)
PMUV3_INIT_SIMPLE(armv8_cortex_a34)
@@ -1247,51 +1352,24 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
-static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", armv8_a53_map_event);
-}
-
-static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", armv8_a53_map_event);
-}
-
-static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event);
-}
-
-static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", armv8_a57_map_event);
-}
-
-static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", armv8_a73_map_event);
-}
-
-static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", armv8_thunder_map_event);
-}
-
-static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", armv8_vulcan_map_event);
-}
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a72, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a73, armv8_a73_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cavium_thunder, armv8_thunder_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_brcm_vulcan, armv8_vulcan_map_event)
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
{.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
- {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
- {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a35-pmu", .data = armv8_cortex_a35_pmu_init},
+ {.compatible = "arm,cortex-a53-pmu", .data = armv8_cortex_a53_pmu_init},
{.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
- {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+ {.compatible = "arm,cortex-a57-pmu", .data = armv8_cortex_a57_pmu_init},
{.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
- {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
- {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
+ {.compatible = "arm,cortex-a72-pmu", .data = armv8_cortex_a72_pmu_init},
+ {.compatible = "arm,cortex-a73-pmu", .data = armv8_cortex_a73_pmu_init},
{.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
@@ -1309,8 +1387,8 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
{.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
- {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
- {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
+ {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
+ {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
{.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
{},
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d2b0cbf0e0c4..b622d75d8c9e 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -206,28 +206,6 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_inv_event_filter_LO 0
#define ATTR_CFG_FLD_inv_event_filter_HI 63
-/* Why does everything I do descend into this? */
-#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
-
-#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
-
-#define GEN_PMU_FORMAT_ATTR(name) \
- PMU_FORMAT_ATTR(name, \
- _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI))
-
-#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
- ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
-
-#define ATTR_CFG_GET_FLD(attr, name) \
- _ATTR_CFG_GET_FLD(attr, \
- ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI)
-
GEN_PMU_FORMAT_ATTR(ts_enable);
GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
new file mode 100644
index 000000000000..957058ad0099
--- /dev/null
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe PMU driver
+ *
+ * Copyright (C) 2021-2023 Alibaba Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DWC_PCIE_VSEC_RAS_DES_ID 0x02
+#define DWC_PCIE_EVENT_CNT_CTL 0x8
+
+/*
+ * Event Counter Data Select includes two parts:
+ * - 27-24: Group number(4-bit: 0..0x7)
+ * - 23-16: Event number(8-bit: 0..0x13) within the Group
+ *
+ * Put them together as in TRM.
+ */
+#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16)
+#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8)
+#define DWC_PCIE_CNT_STATUS BIT(7)
+#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2)
+#define DWC_PCIE_PER_EVENT_OFF 0x1
+#define DWC_PCIE_PER_EVENT_ON 0x3
+#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
+#define DWC_PCIE_EVENT_PER_CLEAR 0x1
+
+#define DWC_PCIE_EVENT_CNT_DATA 0xC
+
+#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
+#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24)
+#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8)
+#define DWC_PCIE_DURATION_MANUAL_CTL 0x0
+#define DWC_PCIE_DURATION_1MS 0x1
+#define DWC_PCIE_DURATION_10MS 0x2
+#define DWC_PCIE_DURATION_100MS 0x3
+#define DWC_PCIE_DURATION_1S 0x4
+#define DWC_PCIE_DURATION_2S 0x5
+#define DWC_PCIE_DURATION_4S 0x6
+#define DWC_PCIE_DURATION_4US 0xFF
+#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0)
+#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1
+
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18
+
+/* Event attributes */
+#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0)
+#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16)
+#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20)
+
+#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
+#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
+#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
+
+enum dwc_pcie_event_type {
+ DWC_PCIE_TIME_BASE_EVENT,
+ DWC_PCIE_LANE_EVENT,
+ DWC_PCIE_EVENT_TYPE_MAX,
+};
+
+#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
+#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
+
+struct dwc_pcie_pmu {
+ struct pmu pmu;
+ struct pci_dev *pdev; /* Root Port device */
+ u16 ras_des_offset;
+ u32 nr_lanes;
+
+ struct list_head pmu_node;
+ struct hlist_node cpuhp_node;
+ struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
+ int on_cpu;
+};
+
+#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
+
+static int dwc_pcie_pmu_hp_state;
+static struct list_head dwc_pcie_dev_info_head =
+ LIST_HEAD_INIT(dwc_pcie_dev_info_head);
+static bool notify;
+
+struct dwc_pcie_dev_info {
+ struct platform_device *plat_dev;
+ struct pci_dev *pdev;
+ struct list_head dev_node;
+};
+
+struct dwc_pcie_vendor_id {
+ int vendor_id;
+};
+
+static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
+ {.vendor_id = PCI_VENDOR_ID_ALIBABA },
+ {} /* terminator */
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group dwc_pcie_cpumask_attr_group = {
+ .attrs = dwc_pcie_pmu_cpumask_attrs,
+};
+
+struct dwc_pcie_format_attr {
+ struct device_attribute attr;
+ u64 field;
+ int config;
+};
+
+PMU_FORMAT_ATTR(eventid, "config:0-15");
+PMU_FORMAT_ATTR(type, "config:16-19");
+PMU_FORMAT_ATTR(lane, "config:20-27");
+
+static struct attribute *dwc_pcie_format_attrs[] = {
+ &format_attr_type.attr,
+ &format_attr_eventid.attr,
+ &format_attr_lane.attr,
+ NULL,
+};
+
+static struct attribute_group dwc_pcie_format_attrs_group = {
+ .name = "format",
+ .attrs = dwc_pcie_format_attrs,
+};
+
+struct dwc_pcie_event_attr {
+ struct device_attribute attr;
+ enum dwc_pcie_event_type type;
+ u16 eventid;
+ u8 lane;
+};
+
+static ssize_t dwc_pcie_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc_pcie_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == DWC_PCIE_LANE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n",
+ eattr->eventid, eattr->type);
+ else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n",
+ eattr->eventid, eattr->type);
+
+ return 0;
+}
+
+#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \
+ (&((struct dwc_pcie_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \
+ .type = _type, \
+ .eventid = _eventid, \
+ .lane = _lane, \
+ }})[0].attr.attr)
+
+#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
+#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
+
+static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
+ /* Group #0 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
+
+ /* Group #1 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23),
+
+ /*
+ * Leave it to the user to specify the lane ID to avoid generating
+ * a list of hundreds of events.
+ */
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717),
+ NULL
+};
+
+static const struct attribute_group dwc_pcie_event_attrs_group = {
+ .name = "events",
+ .attrs = dwc_pcie_pmu_time_event_attrs,
+};
+
+static const struct attribute_group *dwc_pcie_attr_groups[] = {
+ &dwc_pcie_event_attrs_group,
+ &dwc_pcie_format_attrs_group,
+ &dwc_pcie_cpumask_attr_group,
+ NULL
+};
+
+static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+
+ if (enable)
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ else
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+}
+
+static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL,
+ DWC_PCIE_TIME_BASED_TIMER_START, enable);
+}
+
+static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
+
+ return val;
+}
+
+static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 lo, hi, ss;
+ u64 val;
+
+ /*
+ * The 64-bit value of the data counter is spread across two
+ * registers that are not synchronized. In order to read them
+ * atomically, ensure that the high 32 bits match before and after
+ * reading the low 32 bits.
+ */
+ pci_read_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi);
+ do {
+ /* snapshot the high 32 bits */
+ ss = hi;
+
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW,
+ &lo);
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH,
+ &hi);
+ } while (hi != ss);
+
+ val = ((u64)hi << 32) | lo;
+ /*
+ * The Group#1 event measures the amount of data processed in 16-byte
+ * units. Simplify the end-user interface by multiplying the counter
+ * at the point of read.
+ */
+ if (event_id >= 0x20 && event_id <= 0x23)
+ val *= 16;
+
+ return val;
+}
+
+static void dwc_pcie_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ u64 delta, prev, now = 0;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ now = dwc_pcie_pmu_read_lane_event_counter(event);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ now = dwc_pcie_pmu_read_time_based_counter(event);
+
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
+ /* 32-bit counter for Lane Event Counting */
+ if (type == DWC_PCIE_LANE_EVENT)
+ delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
+
+ local64_add(delta, &event->count);
+}
+
+static int dwc_pcie_pmu_event_init(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct perf_event *sibling;
+ u32 lane;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->group_leader != event &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ lane = DWC_PCIE_EVENT_LANE(event);
+ if (lane < 0 || lane >= pcie_pmu->nr_lanes)
+ return -EINVAL;
+ }
+
+ event->cpu = pcie_pmu->on_cpu;
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ hwc->state = 0;
+ local64_set(&hwc->prev_count, 0);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
+}
+
+static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
+
+ dwc_pcie_pmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 ctrl;
+
+ /* one counter for each type and it is in use */
+ if (pcie_pmu->event[type])
+ return -ENOSPC;
+
+ pcie_pmu->event[type] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ /* EVENT_COUNTER_DATA_REG needs clear manually */
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+ } else if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ /*
+ * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
+ * use it with any manually controlled duration. And it is
+ * cleared when next measurement starts.
+ */
+ ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL,
+ DWC_PCIE_DURATION_MANUAL_CTL) |
+ DWC_PCIE_TIME_BASED_CNT_ENABLE;
+ pci_write_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl);
+ }
+
+ if (flags & PERF_EF_START)
+ dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+ perf_event_update_userpage(event);
+ pcie_pmu->event[type] = NULL;
+}
+
+static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node);
+}
+
+/*
+ * Find the binded DES capability device info of a PCI device.
+ * @pdev: The PCI device.
+ */
+static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev)
+{
+ struct dwc_pcie_dev_info *dev_info;
+
+ list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node)
+ if (dev_info->pdev == pdev)
+ return dev_info;
+
+ return NULL;
+}
+
+static void dwc_pcie_unregister_pmu(void *data)
+{
+ struct dwc_pcie_pmu *pcie_pmu = data;
+
+ perf_pmu_unregister(&pcie_pmu->pmu);
+}
+
+static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
+{
+ const struct dwc_pcie_vendor_id *vid;
+ u16 vsec = 0;
+ u32 val;
+
+ if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
+ return false;
+
+ for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
+ vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
+ DWC_PCIE_VSEC_RAS_DES_ID);
+ if (vsec)
+ break;
+ }
+ if (!vsec)
+ return false;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x04)
+ return false;
+
+ pci_dbg(pdev,
+ "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return true;
+}
+
+static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
+{
+ platform_device_unregister(dev_info->plat_dev);
+ list_del(&dev_info->dev_node);
+ kfree(dev_info);
+}
+
+static int dwc_pcie_register_dev(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev;
+ struct dwc_pcie_dev_info *dev_info;
+ u32 bdf;
+
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf,
+ pdev, sizeof(*pdev));
+
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info)
+ return -ENOMEM;
+
+ /* Cache platform device to handle pci device hotplug */
+ dev_info->plat_dev = plat_dev;
+ dev_info->pdev = pdev;
+ list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head);
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dwc_pcie_dev_info *dev_info;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (!dwc_pcie_match_des_cap(pdev))
+ return NOTIFY_DONE;
+ if (dwc_pcie_register_dev(pdev))
+ return NOTIFY_BAD;
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ dev_info = dwc_pcie_find_dev_info(pdev);
+ if (!dev_info)
+ return NOTIFY_DONE;
+ dwc_pcie_unregister_dev(dev_info);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dwc_pcie_pmu_nb = {
+ .notifier_call = dwc_pcie_pmu_notifier,
+};
+
+static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
+{
+ struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct dwc_pcie_pmu *pcie_pmu;
+ char *name;
+ u32 bdf, val;
+ u16 vsec;
+ int ret;
+
+ vsec = pci_find_vsec_capability(pdev, pdev->vendor,
+ DWC_PCIE_VSEC_RAS_DES_ID);
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
+ if (!pcie_pmu)
+ return -ENOMEM;
+
+ pcie_pmu->pdev = pdev;
+ pcie_pmu->ras_des_offset = vsec;
+ pcie_pmu->nr_lanes = pcie_get_width_cap(pdev);
+ pcie_pmu->on_cpu = -1;
+ pcie_pmu->pmu = (struct pmu){
+ .name = name,
+ .parent = &pdev->dev,
+ .module = THIS_MODULE,
+ .attr_groups = dwc_pcie_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = dwc_pcie_pmu_event_init,
+ .add = dwc_pcie_pmu_event_add,
+ .del = dwc_pcie_pmu_event_del,
+ .start = dwc_pcie_pmu_event_start,
+ .stop = dwc_pcie_pmu_event_stop,
+ .read = dwc_pcie_pmu_event_update,
+ };
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
+ &pcie_pmu->cpuhp_node);
+ if (ret) {
+ pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf);
+ return ret;
+ }
+
+ /* Unwind when platform driver removes */
+ ret = devm_add_action_or_reset(&plat_dev->dev,
+ dwc_pcie_pmu_remove_cpuhp_instance,
+ &pcie_pmu->cpuhp_node);
+ if (ret)
+ return ret;
+
+ ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
+ if (ret) {
+ pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
+ pcie_pmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ if (pcie_pmu->on_cpu == -1)
+ pcie_pmu->on_cpu = cpumask_local_spread(
+ 0, dev_to_node(&pcie_pmu->pdev->dev));
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+ struct pci_dev *pdev;
+ int node;
+ cpumask_t mask;
+ unsigned int target;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (cpu != pcie_pmu->on_cpu)
+ return 0;
+
+ pcie_pmu->on_cpu = -1;
+ pdev = pcie_pmu->pdev;
+ node = dev_to_node(&pdev->dev);
+ if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
+ cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
+ target = cpumask_any(&mask);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids) {
+ pci_err(pdev, "There is no CPU to set\n");
+ return 0;
+ }
+
+ /* This PMU does NOT support interrupt, just migrate context. */
+ perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
+ pcie_pmu->on_cpu = target;
+
+ return 0;
+}
+
+static struct platform_driver dwc_pcie_pmu_driver = {
+ .probe = dwc_pcie_pmu_probe,
+ .driver = {.name = "dwc_pcie_pmu",},
+};
+
+static int __init dwc_pcie_pmu_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ bool found = false;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ if (!dwc_pcie_match_des_cap(pdev))
+ continue;
+
+ ret = dwc_pcie_register_dev(pdev);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+
+ found = true;
+ }
+ if (!found)
+ return -ENODEV;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/dwc_pcie_pmu:online",
+ dwc_pcie_pmu_online_cpu,
+ dwc_pcie_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ dwc_pcie_pmu_hp_state = ret;
+
+ ret = platform_driver_register(&dwc_pcie_pmu_driver);
+ if (ret)
+ goto platform_driver_register_err;
+
+ ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ if (ret)
+ goto platform_driver_register_err;
+ notify = true;
+
+ return 0;
+
+platform_driver_register_err:
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+
+ return ret;
+}
+
+static void __exit dwc_pcie_pmu_exit(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ if (notify)
+ bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
+ dwc_pcie_unregister_dev(dev_info);
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+}
+
+module_init(dwc_pcie_pmu_init);
+module_exit(dwc_pcie_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
+MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 92611c98120f..7dbfaee372c7 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -19,6 +19,8 @@
#define COUNTER_READ 0x20
#define COUNTER_DPCR1 0x30
+#define COUNTER_MUX_CNTL 0x50
+#define COUNTER_MASK_COMP 0x54
#define CNTL_OVER 0x1
#define CNTL_CLEAR 0x2
@@ -32,6 +34,13 @@
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
+#define READ_PORT_SHIFT 0
+#define READ_PORT_MASK (0x7 << READ_PORT_SHIFT)
+#define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */
+#define WRITE_PORT_SHIFT 8
+#define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT)
+#define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */
+
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
@@ -50,6 +59,7 @@ static DEFINE_IDA(ddr_ida);
/* DDR Perf hardware feature */
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
+#define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
@@ -82,6 +92,11 @@ static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.identifier = "i.MX8MP",
};
+static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
+ .identifier = "i.MX8DXL",
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
@@ -89,6 +104,7 @@ static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
+ { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -144,6 +160,7 @@ static const struct attribute_group ddr_perf_identifier_attr_group = {
enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED,
+ PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER,
PERF_CAP_AXI_ID_FEAT_MAX,
};
@@ -157,6 +174,8 @@ static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
case PERF_CAP_AXI_ID_FILTER_ENHANCED:
quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER:
+ return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER);
default:
WARN(1, "unknown filter cap %d\n", cap);
}
@@ -187,6 +206,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
static struct attribute *ddr_perf_filter_cap_attr[] = {
PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
+ PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER),
NULL,
};
@@ -272,11 +292,15 @@ static const struct attribute_group ddr_perf_events_attr_group = {
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(axi_id, "config1:0-15");
PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
+PMU_FORMAT_ATTR(axi_port, "config2:0-2");
+PMU_FORMAT_ATTR(axi_channel, "config2:3-3");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
&format_attr_axi_id.attr,
&format_attr_axi_mask.attr,
+ &format_attr_axi_port.attr,
+ &format_attr_axi_channel.attr,
NULL,
};
@@ -530,6 +554,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int counter;
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
+ int cfg2 = event->attr.config2;
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
int i;
@@ -553,6 +578,26 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
return -EOPNOTSUPP;
}
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) {
+ if (ddr_perf_is_filtered(event)) {
+ /* revert axi id masking(axi_mask) value */
+ cfg1 ^= AXI_MASKING_REVERT;
+ writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4));
+
+ if (cfg == 0x41) {
+ /* revert axi read channel(axi_channel) value */
+ cfg2 ^= READ_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2);
+ } else {
+ /* revert axi write channel(axi_channel) value */
+ cfg2 ^= WRITE_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2);
+ }
+
+ writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4));
+ }
+ }
+
pmu->events[counter] = event;
hwc->idx = counter;
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 5cf770a1bc31..9685645bfe04 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -617,7 +617,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu);
- pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id);
if (!name) {
ret = -ENOMEM;
@@ -674,7 +674,7 @@ cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
format_string_err:
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
@@ -688,7 +688,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 63da05e5831c..636fb79647c8 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -383,8 +383,8 @@ static struct attribute *hisi_uc_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
- HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
- HISI_PMU_EVENT_ATTR(cycles, 0x9c),
+ HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c),
+ HISI_PMU_EVENT_ATTR(cycles, 0x95),
HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 091fdc154d79..6bf6f0e7b597 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -454,7 +454,7 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.data = data;
debug_info->panicinfo_blob.size = ret;
- debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
+ debugfs_create_blob("panicinfo", 0444, debug_info->dir,
&debug_info->panicinfo_blob);
return 0;
diff --git a/drivers/platform/mips/rs780e-acpi.c b/drivers/platform/mips/rs780e-acpi.c
index bb0e8ae0eefd..5b8f9cc32589 100644
--- a/drivers/platform/mips/rs780e-acpi.c
+++ b/drivers/platform/mips/rs780e-acpi.c
@@ -32,29 +32,25 @@ static u8 pmio_read_index(u16 index, u8 reg)
return inb(index + 1);
}
-void pm_iowrite(u8 reg, u8 value)
+static void pm_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM_INDEX, reg, value);
}
-EXPORT_SYMBOL(pm_iowrite);
-u8 pm_ioread(u8 reg)
+static u8 pm_ioread(u8 reg)
{
return pmio_read_index(PM_INDEX, reg);
}
-EXPORT_SYMBOL(pm_ioread);
-void pm2_iowrite(u8 reg, u8 value)
+static void pm2_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM2_INDEX, reg, value);
}
-EXPORT_SYMBOL(pm2_iowrite);
-u8 pm2_ioread(u8 reg)
+static u8 pm2_ioread(u8 reg)
{
return pmio_read_index(PM2_INDEX, reg);
}
-EXPORT_SYMBOL(pm2_ioread);
static void acpi_hw_clear_status(void)
{
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index 88afc38ffdc5..957c216c180c 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -5,7 +5,7 @@ menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
depends on SERIAL_DEV_BUS
depends on ACPI && !RISCV
- select CRC_CCITT
+ select CRC_ITU_T
help
The Surface System Aggregator Module (Surface SAM or SSAM) is an
embedded controller (EC) found on 5th- and later-generation Microsoft
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index e4dee920da18..20f3870915d2 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -736,34 +736,6 @@ do { \
#define san_consumer_warn(dev, handle, fmt, ...) \
san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
-static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
-{
- struct acpi_handle_list dep_devices;
- acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
- acpi_status status;
- bool ret = false;
- int i;
-
- if (!acpi_has_method(handle, "_DEP"))
- return false;
-
- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
- if (ACPI_FAILURE(status)) {
- san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
- return false;
- }
-
- for (i = 0; i < dep_devices.count; i++) {
- if (dep_devices.handles[i] == supplier) {
- ret = true;
- break;
- }
- }
-
- acpi_handle_list_free(&dep_devices);
- return ret;
-}
-
static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
@@ -772,7 +744,7 @@ static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
struct acpi_device *adev;
struct device_link *link;
- if (!is_san_consumer(pdev, handle))
+ if (!acpi_device_dep(handle, ACPI_HANDLE(&pdev->dev)))
return AE_OK;
/* Ignore ACPI devices that are not present. */
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index fcf1ce8bbdc5..1cf2471d54dd 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -26,21 +26,6 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
{}
};
-/*
- * Cache BAR0 of P2SB device functions 0 to 7.
- * TODO: The constant 8 is the number of functions that PCI specification
- * defines. Same definitions exist tree-wide. Unify this definition and
- * the other definitions then move to include/uapi/linux/pci.h.
- */
-#define NR_P2SB_RES_CACHE 8
-
-struct p2sb_res_cache {
- u32 bus_dev_id;
- struct resource res;
-};
-
-static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
-
static int p2sb_get_devfn(unsigned int *devfn)
{
unsigned int fn = P2SB_DEVFN_DEFAULT;
@@ -54,16 +39,8 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
-static bool p2sb_valid_resource(struct resource *res)
-{
- if (res->flags)
- return true;
-
- return false;
-}
-
/* Copy resource from the first BAR of the device in question */
-static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
struct resource *bar0 = &pdev->resource[0];
@@ -79,64 +56,47 @@ static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
mem->end = bar0->end;
mem->flags = bar0->flags;
mem->desc = bar0->desc;
+
+ return 0;
}
-static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
- struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
struct pci_dev *pdev;
+ int ret;
pdev = pci_scan_single_device(bus, devfn);
if (!pdev)
- return;
+ return -ENODEV;
- p2sb_read_bar0(pdev, &cache->res);
- cache->bus_dev_id = bus->dev.id;
+ ret = p2sb_read_bar0(pdev, mem);
pci_stop_and_remove_bus_device(pdev);
- return;
-}
-
-static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
-{
- unsigned int slot, fn;
-
- if (PCI_FUNC(devfn) == 0) {
- /*
- * When function number of the P2SB device is zero, scan it and
- * other function numbers, and if devices are available, cache
- * their BAR0s.
- */
- slot = PCI_SLOT(devfn);
- for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
- p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
- } else {
- /* Scan the P2SB device and cache its BAR0 */
- p2sb_scan_and_cache_devfn(bus, devfn);
- }
-
- if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
- return -ENOENT;
-
- return 0;
-}
-
-static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
-{
- static struct pci_bus *p2sb_bus;
-
- bus = bus ?: p2sb_bus;
- if (bus)
- return bus;
-
- /* Assume P2SB is on the bus 0 in domain 0 */
- p2sb_bus = pci_find_bus(0, 0);
- return p2sb_bus;
+ return ret;
}
-static int p2sb_cache_resources(void)
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ *
+ * if @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Locking is handled by pci_rescan_remove_lock mutex.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
- struct pci_bus *bus;
+ struct pci_dev *pdev_p2sb;
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
int ret;
@@ -146,9 +106,8 @@ static int p2sb_cache_resources(void)
if (ret)
return ret;
- bus = p2sb_get_bus(NULL);
- if (!bus)
- return -ENODEV;
+ /* if @bus is NULL, use bus 0 in domain 0 */
+ bus = bus ?: pci_find_bus(0, 0);
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
@@ -156,16 +115,17 @@ static int p2sb_cache_resources(void)
*/
pci_lock_rescan_remove();
- /*
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- * Unhide the P2SB device here, if needed.
- */
+ /* Unhide the P2SB device, if needed */
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
- ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+ pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+ if (devfn)
+ ret = p2sb_scan_and_read(bus, devfn, mem);
+ else
+ ret = p2sb_read_bar0(pdev_p2sb, mem);
+ pci_stop_and_remove_bus_device(pdev_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
@@ -173,62 +133,12 @@ static int p2sb_cache_resources(void)
pci_unlock_rescan_remove();
- return ret;
-}
-
-/**
- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
- * @bus: PCI bus to communicate with
- * @devfn: PCI slot and function to communicate with
- * @mem: memory resource to be filled in
- *
- * If @bus is NULL, the bus 0 in domain 0 will be used.
- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
- *
- * Caller must provide a valid pointer to @mem.
- *
- * Return:
- * 0 on success or appropriate errno value on error.
- */
-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
-{
- struct p2sb_res_cache *cache;
- int ret;
-
- bus = p2sb_get_bus(bus);
- if (!bus)
- return -ENODEV;
-
- if (!devfn) {
- ret = p2sb_get_devfn(&devfn);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
- cache = &p2sb_resources[PCI_FUNC(devfn)];
- if (cache->bus_dev_id != bus->dev.id)
+ if (mem->flags == 0)
return -ENODEV;
- if (!p2sb_valid_resource(&cache->res))
- return -ENOENT;
-
- memcpy(mem, &cache->res, sizeof(*mem));
return 0;
}
EXPORT_SYMBOL_GPL(p2sb_bar);
-
-static int __init p2sb_fs_init(void)
-{
- p2sb_cache_resources();
- return 0;
-}
-
-/*
- * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
- * not be locked in sysfs pci bus rescan path because of deadlock. To
- * avoid the deadlock, access to P2SB devices with the lock at an early
- * step in kernel initialization and cache required resources. This
- * should happen after subsys_initcall which initializes PCI subsystem
- * and before device_initcall which requires P2SB resources.
- */
-fs_initcall(p2sb_fs_init);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 46c534f6b1c9..0a5d0d8befa8 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -256,7 +256,7 @@ static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
.restore = pnp_bus_resume,
};
-struct bus_type pnp_bus_type = {
+const struct bus_type pnp_bus_type = {
.name = "pnp",
.match = pnp_bus_match,
.probe = pnp_device_probe,
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 4f05f610391b..c02ce0834c2c 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -151,13 +151,13 @@ static int vendor_resource_matches(struct pnp_dev *dev,
static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor)
{
- if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
- u64 start, length;
+ struct { u64 start, length; } range;
- memcpy(&start, vendor->byte_data, sizeof(start));
- memcpy(&length, vendor->byte_data + 8, sizeof(length));
-
- pnp_add_mem_resource(dev, start, start + length - 1, 0);
+ if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid,
+ sizeof(range))) {
+ memcpy(&range, vendor->byte_data, sizeof(range));
+ pnp_add_mem_resource(dev, range.start, range.start +
+ range.length - 1, 0);
}
}
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index ddc6f2163c8e..1f31dce5835a 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -60,7 +60,7 @@ do { \
set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0)
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(DESC_DATA32_BIOS,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/*
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 4021d3d325f9..e7defce8cf48 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4492,7 +4492,7 @@ ptp_ocp_remove(struct pci_dev *pdev)
cancel_delayed_work_sync(&bp->sync_work);
for (i = 0; i < OCP_SMA_NUM; i++) {
if (bp->sma[i].dpll_pin) {
- dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, bp);
+ dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
dpll_pin_put(bp->sma[i].dpll_pin);
}
}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 83323c3d10af..4b84270a8906 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,8 +51,9 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be read into
*
- * Generates a local SREP space read. Returns %0 on
- * success or %-EINVAL on failure.
+ * Generates a local SREP space read.
+ *
+ * Returns: %0 on success or %-EINVAL on failure.
*/
static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
int len, u32 *data)
@@ -75,8 +76,9 @@ static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be written
*
- * Generates a local write into SREP configuration space. Returns %0 on
- * success or %-EINVAL on failure.
+ * Generates a local write into SREP configuration space.
+ *
+ * Returns: %0 on success or %-EINVAL on failure.
*/
static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
int len, u32 data)
@@ -104,7 +106,7 @@ static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
* @do_wr: Operation flag (1 == MAINT_WR)
*
* Generates a RapidIO maintenance transaction (Read or Write).
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
@@ -205,10 +207,10 @@ err_out:
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
- * @val: Location to be read into
+ * @data: Location to be read into
*
* Generates a RapidIO maintenance read transaction.
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 *data)
@@ -228,10 +230,10 @@ static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
- * @val: Value to be written
+ * @data: Value to be written
*
* Generates a RapidIO maintenance write transaction.
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 data)
@@ -250,6 +252,8 @@ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
* Handles inbound port-write interrupts. Copies PW message from an internal
* buffer into PW message FIFO and schedules deferred routine to process
* queued messages.
+ *
+ * Returns: %0
*/
static int
tsi721_pw_handler(struct tsi721_device *priv)
@@ -307,6 +311,8 @@ static void tsi721_pw_dpc(struct work_struct *work)
* tsi721_pw_enable - enable/disable port-write interface init
* @mport: Master port implementing the port write unit
* @enable: 1=enable; 0=disable port-write message handling
+ *
+ * Returns: %0
*/
static int tsi721_pw_enable(struct rio_mport *mport, int enable)
{
@@ -336,7 +342,9 @@ static int tsi721_pw_enable(struct rio_mport *mport, int enable)
* @destid: Destination ID of target device
* @data: 16-bit info field of RapidIO doorbell
*
- * Sends a RapidIO doorbell message. Always returns %0.
+ * Sends a RapidIO doorbell message.
+ *
+ * Returns: %0
*/
static int tsi721_dsend(struct rio_mport *mport, int index,
u16 destid, u16 data)
@@ -361,6 +369,8 @@ static int tsi721_dsend(struct rio_mport *mport, int index,
* Handles inbound doorbell interrupts. Copies doorbell entry from an internal
* buffer into DB message FIFO and schedules deferred routine to process
* queued DBs.
+ *
+ * Returns: %0
*/
static int
tsi721_dbell_handler(struct tsi721_device *priv)
@@ -453,6 +463,8 @@ static void tsi721_db_dpc(struct work_struct *work)
*
* Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
* interrupt events and calls an event-specific handler(s).
+ *
+ * Returns: %IRQ_HANDLED or %IRQ_NONE
*/
static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
{
@@ -607,6 +619,8 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles outbound messaging interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
{
@@ -624,6 +638,8 @@ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles inbound messaging interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
{
@@ -641,6 +657,8 @@ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles Tsi721 interrupts from SRIO MAC.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
{
@@ -663,6 +681,8 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
* Handles Tsi721 interrupts from SR2PC Channel.
* NOTE: At this moment services only one SR2PC channel associated with inbound
* doorbells.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
{
@@ -689,6 +709,8 @@ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
* Registers MSI-X interrupt service routines for interrupts that are active
* immediately after mport initialization. Messaging interrupt service routines
* should be registered during corresponding open requests.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_request_msix(struct tsi721_device *priv)
{
@@ -717,6 +739,8 @@ static int tsi721_request_msix(struct tsi721_device *priv)
*
* Configures MSI-X support for Tsi721. Supports only an exact number
* of requested vectors.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_enable_msix(struct tsi721_device *priv)
{
@@ -1334,7 +1358,7 @@ static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
* @priv: pointer to tsi721 private data
*
* Initializes inbound port write handler.
- * Returns %0 on success or %-ENOMEM on failure.
+ * Returns: %0 on success or %-ENOMEM on failure.
*/
static int tsi721_port_write_init(struct tsi721_device *priv)
{
@@ -1412,7 +1436,8 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
*
* Initialize BDMA channel allocated for RapidIO maintenance read/write
* request generation
- * Returns %0 on success or %-ENOMEM on failure.
+ *
+ * Returns: %0 on success or %-ENOMEM on failure.
*/
static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
@@ -1662,6 +1687,8 @@ tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
* @mbox: Outbound mailbox
* @buffer: Message to add to outbound queue
* @len: Length of message
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int
tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
@@ -1869,6 +1896,8 @@ no_sts_update:
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the outbound mailbox ring
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
int mbox, int entries)
@@ -2156,6 +2185,8 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the inbound mailbox ring
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
int mbox, int entries)
@@ -2409,6 +2440,8 @@ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
* @mport: Master port implementing the Inbound Messaging Engine
* @mbox: Inbound mailbox number
* @buf: Buffer to add to inbound queue
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
{
@@ -2439,7 +2472,7 @@ out:
* @mport: Master port implementing the Inbound Messaging Engine
* @mbox: Inbound mailbox number
*
- * Returns pointer to the message on success or NULL on failure.
+ * Returns: pointer to the message on success or %NULL on failure.
*/
static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
{
@@ -2507,6 +2540,8 @@ out:
* @priv: pointer to tsi721 private data
*
* Configures Tsi721 messaging engine.
+ *
+ * Returns: %0
*/
static int tsi721_messages_init(struct tsi721_device *priv)
{
@@ -2539,9 +2574,9 @@ static int tsi721_messages_init(struct tsi721_device *priv)
/**
* tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
* @mport: Master port implementing the Inbound Messaging Engine
- * @mbox: Inbound mailbox number
+ * @attr: mport device attributes
*
- * Returns pointer to the message on success or NULL on failure.
+ * Returns: pointer to the message on success or %NULL on failure.
*/
static int tsi721_query_mport(struct rio_mport *mport,
struct rio_mport_attr *attr)
@@ -2653,6 +2688,8 @@ static void tsi721_mport_release(struct device *dev)
* @priv: pointer to tsi721 private data
*
* Configures Tsi721 as RapidIO master port.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_setup_mport(struct tsi721_device *priv)
{
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index d375c02059f3..f77f75172bdc 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -283,11 +283,13 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
#ifdef CONFIG_PCI_MSI
/**
- * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * tsi721_bdma_msix - MSI-X interrupt handler for BDMA channels
* @irq: Linux interrupt number
* @ptr: Pointer to interrupt-specific data (BDMA channel structure)
*
* Handles BDMA channel interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
{
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f3ec24691378..550145f82726 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -56,6 +56,16 @@ config REGULATOR_USERSPACE_CONSUMER
If unsure, say no.
+config REGULATOR_NETLINK_EVENTS
+ bool "Enable support for receiving regulator events via netlink"
+ depends on NET
+ help
+ Enabling this option allows the kernel to broadcast regulator events using
+ the netlink mechanism. User-space applications can subscribe to these events
+ for real-time updates on various regulator events.
+
+ If unsure, say no.
+
config REGULATOR_88PG86X
tristate "Marvell 88PG86X voltage regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b2b059b5ee56..46fb569e6be8 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o irq_helpers.o
+obj-$(CONFIG_REGULATOR_NETLINK_EVENTS) += event.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index b465c0010665..4b54068d4f59 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -339,14 +339,12 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return ret;
}
-static int arizona_ldo1_remove(struct platform_device *pdev)
+static void arizona_ldo1_remove(struct platform_device *pdev)
{
struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
if (ldo1->ena_gpiod)
gpiod_put(ldo1->ena_gpiod);
-
- return 0;
}
static int madera_ldo1_probe(struct platform_device *pdev)
@@ -377,7 +375,7 @@ static int madera_ldo1_probe(struct platform_device *pdev)
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
- .remove = arizona_ldo1_remove,
+ .remove_new = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
@@ -386,7 +384,7 @@ static struct platform_driver arizona_ldo1_driver = {
static struct platform_driver madera_ldo1_driver = {
.probe = madera_ldo1_probe,
- .remove = arizona_ldo1_remove,
+ .remove_new = arizona_ldo1_remove,
.driver = {
.name = "madera-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index d469481d8442..c7ceba56e7dc 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -260,10 +260,9 @@ static const struct dev_pm_ops bd9571mwv_pm = {
SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume)
};
-static int bd9571mwv_regulator_remove(struct platform_device *pdev)
+static void bd9571mwv_regulator_remove(struct platform_device *pdev)
{
device_remove_file(&pdev->dev, &dev_attr_backup_mode);
- return 0;
}
#define DEV_PM_OPS &bd9571mwv_pm
#else
@@ -357,7 +356,7 @@ static struct platform_driver bd9571mwv_regulator_driver = {
.pm = DEV_PM_OPS,
},
.probe = bd9571mwv_regulator_probe,
- .remove = bd9571mwv_regulator_remove,
+ .remove_new = bd9571mwv_regulator_remove,
.id_table = bd9571mwv_regulator_id_table,
};
module_platform_driver(bd9571mwv_regulator_driver);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 3137e40fcd3e..a968dabb48f5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
@@ -32,6 +33,7 @@
#include "dummy.h"
#include "internal.h"
+#include "regnl.h"
static DEFINE_WW_CLASS(regulator_ww_class);
static DEFINE_MUTEX(regulator_nesting_mutex);
@@ -2918,7 +2920,8 @@ static int _regulator_enable(struct regulator *regulator)
/* Fallthrough on positive return values - already enabled */
}
- rdev->use_count++;
+ if (regulator->enable_count == 1)
+ rdev->use_count++;
return 0;
@@ -2993,37 +2996,40 @@ static int _regulator_disable(struct regulator *regulator)
lockdep_assert_held_once(&rdev->mutex.base);
- if (WARN(rdev->use_count <= 0,
+ if (WARN(regulator->enable_count == 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
return -EIO;
- /* are we the last user and permitted to disable ? */
- if (rdev->use_count == 1 &&
- (rdev->constraints && !rdev->constraints->always_on)) {
-
- /* we are last user */
- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
- ret = _notifier_call_chain(rdev,
- REGULATOR_EVENT_PRE_DISABLE,
- NULL);
- if (ret & NOTIFY_STOP_MASK)
- return -EINVAL;
-
- ret = _regulator_do_disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
- _notifier_call_chain(rdev,
- REGULATOR_EVENT_ABORT_DISABLE,
+ if (regulator->enable_count == 1) {
+ /* disabling last enable_count from this regulator */
+ /* are we the last user and permitted to disable ? */
+ if (rdev->use_count == 1 &&
+ (rdev->constraints && !rdev->constraints->always_on)) {
+
+ /* we are last user */
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
+ ret = _notifier_call_chain(rdev,
+ REGULATOR_EVENT_PRE_DISABLE,
+ NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ return -EINVAL;
+
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
+ _notifier_call_chain(rdev,
+ REGULATOR_EVENT_ABORT_DISABLE,
+ NULL);
+ return ret;
+ }
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
NULL);
- return ret;
}
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
- }
- rdev->use_count = 0;
- } else if (rdev->use_count > 1) {
- rdev->use_count--;
+ rdev->use_count = 0;
+ } else if (rdev->use_count > 1) {
+ rdev->use_count--;
+ }
}
if (ret == 0)
@@ -4849,7 +4855,23 @@ static int _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
/* call rdev chain first */
- return blocking_notifier_call_chain(&rdev->notifier, event, data);
+ int ret = blocking_notifier_call_chain(&rdev->notifier, event, data);
+
+ if (IS_REACHABLE(CONFIG_REGULATOR_NETLINK_EVENTS)) {
+ struct device *parent = rdev->dev.parent;
+ const char *rname = rdev_get_name(rdev);
+ char name[32];
+
+ /* Avoid duplicate debugfs directory names */
+ if (parent && rname == rdev->desc->name) {
+ snprintf(name, sizeof(name), "%s-%s", dev_name(parent),
+ rname);
+ rname = name;
+ }
+ reg_generate_netlink_event(rname, event);
+ }
+
+ return ret;
}
int _regulator_bulk_get(struct device *dev, int num_consumers,
@@ -5062,6 +5084,41 @@ void regulator_bulk_free(int num_consumers,
EXPORT_SYMBOL_GPL(regulator_bulk_free);
/**
+ * regulator_handle_critical - Handle events for system-critical regulators.
+ * @rdev: The regulator device.
+ * @event: The event being handled.
+ *
+ * This function handles critical events such as under-voltage, over-current,
+ * and unknown errors for regulators deemed system-critical. On detecting such
+ * events, it triggers a hardware protection shutdown with a defined timeout.
+ */
+static void regulator_handle_critical(struct regulator_dev *rdev,
+ unsigned long event)
+{
+ const char *reason = NULL;
+
+ if (!rdev->constraints->system_critical)
+ return;
+
+ switch (event) {
+ case REGULATOR_EVENT_UNDER_VOLTAGE:
+ reason = "System critical regulator: voltage drop detected";
+ break;
+ case REGULATOR_EVENT_OVER_CURRENT:
+ reason = "System critical regulator: over-current detected";
+ break;
+ case REGULATOR_EVENT_FAIL:
+ reason = "System critical regulator: unknown error";
+ }
+
+ if (!reason)
+ return;
+
+ hw_protection_shutdown(reason,
+ rdev->constraints->uv_less_critical_window_ms);
+}
+
+/**
* regulator_notifier_call_chain - call regulator event notifier
* @rdev: regulator source
* @event: notifier block
@@ -5073,6 +5130,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
+ regulator_handle_critical(rdev, event);
+
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -6234,6 +6293,14 @@ unlock:
return 0;
}
+static bool regulator_ignore_unused;
+static int __init regulator_ignore_unused_setup(char *__unused)
+{
+ regulator_ignore_unused = true;
+ return 1;
+}
+__setup("regulator_ignore_unused", regulator_ignore_unused_setup);
+
static void regulator_init_complete_work_function(struct work_struct *work)
{
/*
@@ -6246,6 +6313,15 @@ static void regulator_init_complete_work_function(struct work_struct *work)
class_for_each_device(&regulator_class, NULL, NULL,
regulator_register_resolve_supply);
+ /*
+ * For debugging purposes, it may be useful to prevent unused
+ * regulators from being disabled.
+ */
+ if (regulator_ignore_unused) {
+ pr_warn("regulator: Not disabling unused regulators\n");
+ return;
+ }
+
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 34c5e485d0af..1e2d54da1b9a 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -469,11 +469,9 @@ static int db8500_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int db8500_regulator_remove(struct platform_device *pdev)
+static void db8500_regulator_remove(struct platform_device *pdev)
{
ux500_regulator_debug_exit();
-
- return 0;
}
static struct platform_driver db8500_regulator_driver = {
@@ -482,7 +480,7 @@ static struct platform_driver db8500_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = db8500_regulator_probe,
- .remove = db8500_regulator_remove,
+ .remove_new = db8500_regulator_remove,
};
static int __init db8500_regulator_init(void)
diff --git a/drivers/regulator/event.c b/drivers/regulator/event.c
new file mode 100644
index 000000000000..ea3bd49544e8
--- /dev/null
+++ b/drivers/regulator/event.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Regulator event over netlink
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#include <regulator/regulator.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/atomic.h>
+
+#include "regnl.h"
+
+static atomic_t reg_event_seqnum = ATOMIC_INIT(0);
+
+static const struct genl_multicast_group reg_event_mcgrps[] = {
+ { .name = REG_GENL_MCAST_GROUP_NAME, },
+};
+
+static struct genl_family reg_event_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
+ .name = REG_GENL_FAMILY_NAME,
+ .version = REG_GENL_VERSION,
+ .maxattr = REG_GENL_ATTR_MAX,
+ .mcgrps = reg_event_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(reg_event_mcgrps),
+};
+
+int reg_generate_netlink_event(const char *reg_name, u64 event)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ struct reg_genl_event *edata;
+ void *msg_header;
+ int size;
+
+ /* allocate memory */
+ size = nla_total_size(sizeof(struct reg_genl_event)) +
+ nla_total_size(0);
+
+ skb = genlmsg_new(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, atomic_inc_return(&reg_event_seqnum),
+ &reg_event_genl_family, 0, REG_GENL_CMD_EVENT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ /* fill the data */
+ attr = nla_reserve(skb, REG_GENL_ATTR_EVENT, sizeof(struct reg_genl_event));
+ if (!attr) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ edata = nla_data(attr);
+ memset(edata, 0, sizeof(struct reg_genl_event));
+
+ strscpy(edata->reg_name, reg_name, sizeof(edata->reg_name));
+ edata->event = event;
+
+ /* send multicast genetlink message */
+ genlmsg_end(skb, msg_header);
+ size = genlmsg_multicast(&reg_event_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ return size;
+}
+
+static int __init reg_event_genetlink_init(void)
+{
+ return genl_register_family(&reg_event_genl_family);
+}
+
+static int __init reg_event_init(void)
+{
+ int error;
+
+ /* create genetlink for acpi event */
+ error = reg_event_genetlink_init();
+ if (error)
+ pr_warn("Failed to create genetlink family for reg event\n");
+
+ return 0;
+}
+
+fs_initcall(reg_event_init);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 1b65e5e4e40f..03afc160fc72 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -131,6 +131,8 @@ static int of_get_regulation_constraints(struct device *dev,
constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
constraints->pull_down = of_property_read_bool(np, "regulator-pull-down");
+ constraints->system_critical = of_property_read_bool(np,
+ "system-critical-regulator");
if (of_property_read_bool(np, "regulator-allow-bypass"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
@@ -173,6 +175,13 @@ static int of_get_regulation_constraints(struct device *dev,
if (!ret)
constraints->enable_time = pval;
+ ret = of_property_read_u32(np, "regulator-uv-survival-time-ms", &pval);
+ if (!ret)
+ constraints->uv_less_critical_window_ms = pval;
+ else
+ constraints->uv_less_critical_window_ms =
+ REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS;
+
constraints->soft_start = of_property_read_bool(np,
"regulator-soft-start");
ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index e0dc033aae0f..60656a815b9e 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1594,7 +1594,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
static int palmas_regulators_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
- struct palmas_pmic_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct palmas_pmic_platform_data *pdata;
struct device_node *node = pdev->dev.of_node;
struct palmas_pmic_driver_data *driver_data;
struct regulator_config config = { };
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index cf502eec0915..80e304711345 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+// Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -68,10 +69,11 @@ enum rpmh_regulator_type {
* @regulator_type: RPMh accelerator type used to manage this
* regulator
* @ops: Pointer to regulator ops callback structure
- * @voltage_range: The single range of voltages supported by this
- * PMIC regulator type
+ * @voltage_ranges: The possible ranges of voltages supported by this
+ * PMIC regulator type
+ * @n_linear_ranges: Number of entries in voltage_ranges
* @n_voltages: The number of unique voltage set points defined
- * by voltage_range
+ * by voltage_ranges
* @hpm_min_load_uA: Minimum load current in microamps that requires
* high power mode (HPM) operation. This is used
* for LDO hardware type regulators only.
@@ -85,7 +87,8 @@ enum rpmh_regulator_type {
struct rpmh_vreg_hw_data {
enum rpmh_regulator_type regulator_type;
const struct regulator_ops *ops;
- const struct linear_range voltage_range;
+ const struct linear_range *voltage_ranges;
+ int n_linear_ranges;
int n_voltages;
int hpm_min_load_uA;
const int *pmic_mode_map;
@@ -449,8 +452,8 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
vreg->mode = REGULATOR_MODE_INVALID;
if (rpmh_data->hw_data->n_voltages) {
- vreg->rdesc.linear_ranges = &rpmh_data->hw_data->voltage_range;
- vreg->rdesc.n_linear_ranges = 1;
+ vreg->rdesc.linear_ranges = rpmh_data->hw_data->voltage_ranges;
+ vreg->rdesc.n_linear_ranges = rpmh_data->hw_data->n_linear_ranges;
vreg->rdesc.n_voltages = rpmh_data->hw_data->n_voltages;
}
@@ -508,6 +511,14 @@ static const int pmic_mode_map_pmic5_ldo[REGULATOR_MODE_STANDBY + 1] = {
[REGULATOR_MODE_FAST] = -EINVAL,
};
+static const int pmic_mode_map_pmic5_ldo_hpm[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = -EINVAL,
+ [REGULATOR_MODE_IDLE] = -EINVAL,
+ [REGULATOR_MODE_NORMAL] = PMIC5_LDO_MODE_HPM,
+ [REGULATOR_MODE_FAST] = -EINVAL,
+};
+
static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
{
unsigned int mode;
@@ -613,7 +624,10 @@ static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
static const struct rpmh_vreg_hw_data pmic4_pldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 256,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -623,7 +637,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo = {
static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 128,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -633,7 +650,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
static const struct rpmh_vreg_hw_data pmic4_nldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 128,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -643,7 +663,10 @@ static const struct rpmh_vreg_hw_data pmic4_nldo = {
static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 216,
.pmic_mode_map = pmic_mode_map_pmic4_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -652,7 +675,10 @@ static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 259,
.pmic_mode_map = pmic_mode_map_pmic4_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -661,7 +687,10 @@ static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
static const struct rpmh_vreg_hw_data pmic4_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 84,
.pmic_mode_map = pmic_mode_map_pmic4_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
@@ -676,7 +705,10 @@ static const struct rpmh_vreg_hw_data pmic4_lvs = {
static const struct rpmh_vreg_hw_data pmic5_pldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 256,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -686,7 +718,10 @@ static const struct rpmh_vreg_hw_data pmic5_pldo = {
static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 63,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -696,17 +731,50 @@ static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
static const struct rpmh_vreg_hw_data pmic5_pldo515_mv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 188,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
.of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
};
+static const struct rpmh_vreg_hw_data pmic5_pldo502 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 256,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_pldo502ln = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 2, 200000),
+ REGULATOR_LINEAR_RANGE(2608000, 3, 28, 16000),
+ REGULATOR_LINEAR_RANGE(3104000, 29, 30, 96000),
+ REGULATOR_LINEAR_RANGE(3312000, 31, 31, 0),
+ },
+ .n_linear_ranges = 4,
+ .n_voltages = 32,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
static const struct rpmh_vreg_hw_data pmic5_nldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 124,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -716,17 +784,36 @@ static const struct rpmh_vreg_hw_data pmic5_nldo = {
static const struct rpmh_vreg_hw_data pmic5_nldo515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 211,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
.of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
};
+static const struct rpmh_vreg_hw_data pmic5_nldo502 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(528000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .hpm_min_load_uA = 30000,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 216,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -735,7 +822,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 264,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -744,7 +834,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 264,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -753,7 +846,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 268,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -762,7 +858,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 268,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -771,7 +870,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 215,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -780,7 +882,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 236,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -789,7 +894,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -798,7 +906,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
static const struct rpmh_vreg_hw_data pmic5_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 32,
.pmic_mode_map = pmic_mode_map_pmic5_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
@@ -1147,6 +1258,16 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm8010_vreg_data[] = {
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo502, "vdd-l1-l2"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo502, "vdd-l1-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_pldo502ln, "vdd-l3-l4"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo502ln, "vdd-l3-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo502, "vdd-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo502ln, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo502, "vdd-l7"),
+};
+
static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
@@ -1463,6 +1584,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8009_1_vreg_data,
},
{
+ .compatible = "qcom,pm8010-rpmh-regulators",
+ .data = pm8010_vreg_data,
+ },
+ {
.compatible = "qcom,pm8150-rpmh-regulators",
.data = pm8150_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index f53ada076252..d1be9568025e 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -796,6 +796,7 @@ static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &mp5496_smps, "s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smps, "s2" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &mp5496_ldoa2, "l5" },
{}
};
@@ -1012,6 +1013,39 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8937_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
+ /* S5 - S6 are managed by SPMI */
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8953_ult_nldo, "vdd_l1_l19" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8953_ult_nldo, "vdd_l2_l23" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8953_ult_nldo, "vdd_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8953_ult_nldo, "vdd_l1_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8953_lnldo, "vdd_l20_l21" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8953_lnldo, "vdd_l20_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_nldo, "vdd_l2_l23" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -1329,6 +1363,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
+ { .compatible = "qcom,rpm-pm8937-regulators", .data = &rpm_pm8937_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 94f9092b29ef..9a9fa20dcd95 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -2239,6 +2239,39 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8937_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l19", },
+ { "l2", 0x4100, "vdd_l2_l23", },
+ { "l3", 0x4200, "vdd_l3", },
+ { "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", },
+ { "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", },
+ { "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", },
+ { "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", },
+ { "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", },
+ { "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", },
+ { "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", },
+ { "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", },
+ { "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", },
+ { "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l19", 0x5200, "vdd_l1_l19", },
+ { "l20", 0x5300, "vdd_l20_l21", },
+ { "l21", 0x5400, "vdd_l21_l21", },
+ { "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", },
+ { "l23", 0x5600, "vdd_l2_l23", },
+ { }
+};
+
static const struct spmi_regulator_data pm8941_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -2453,6 +2486,7 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8909-regulators", .data = &pm8909_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
+ { .compatible = "qcom,pm8937-regulators", .data = &pm8937_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
diff --git a/drivers/regulator/regnl.h b/drivers/regulator/regnl.h
new file mode 100644
index 000000000000..bcba16cc05cc
--- /dev/null
+++ b/drivers/regulator/regnl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Regulator event over netlink
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef __REGULATOR_EVENT_H
+#define __REGULATOR_EVENT_H
+
+int reg_generate_netlink_event(const char *reg_name, u64 event);
+
+#endif
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 717144cbe0f9..40855105dd33 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -233,7 +233,7 @@ err_pm_stop:
return ret;
}
-static int stm32_vrefbuf_remove(struct platform_device *pdev)
+static void stm32_vrefbuf_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
@@ -244,8 +244,6 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
};
static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev)
@@ -282,7 +280,7 @@ MODULE_DEVICE_TABLE(of, stm32_vrefbuf_of_match);
static struct platform_driver stm32_vrefbuf_driver = {
.probe = stm32_vrefbuf_probe,
- .remove = stm32_vrefbuf_remove,
+ .remove_new = stm32_vrefbuf_remove,
.driver = {
.name = "stm32-vrefbuf",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index 79d1a3eb18d4..a498df7cb016 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -15,7 +15,7 @@
#include <dt-bindings/mfd/st,stpmic1.h>
/**
- * struct stpmic1 regulator description: this structure is used as driver data
+ * struct stpmic1_regulator_cfg - this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 1d8304b88bd6..5f868042392f 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -115,7 +115,7 @@ out_rst_assert:
return ret;
}
-static int uniphier_regulator_remove(struct platform_device *pdev)
+static void uniphier_regulator_remove(struct platform_device *pdev)
{
struct uniphier_regulator_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -124,8 +124,6 @@ static int uniphier_regulator_remove(struct platform_device *pdev)
reset_control_assert(priv->rst[i]);
clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
- return 0;
}
/* USB3 controller data */
@@ -209,7 +207,7 @@ MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
static struct platform_driver uniphier_regulator_driver = {
.probe = uniphier_regulator_probe,
- .remove = uniphier_regulator_remove,
+ .remove_new = uniphier_regulator_remove,
.driver = {
.name = "uniphier-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 97f075ed68c9..53d1b9d6f69c 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -194,7 +194,7 @@ err_enable:
return ret;
}
-static int regulator_userspace_consumer_remove(struct platform_device *pdev)
+static void regulator_userspace_consumer_remove(struct platform_device *pdev)
{
struct userspace_consumer_data *data = platform_get_drvdata(pdev);
@@ -202,8 +202,6 @@ static int regulator_userspace_consumer_remove(struct platform_device *pdev)
if (data->enabled && !data->no_autoswitch)
regulator_bulk_disable(data->num_supplies, data->supplies);
-
- return 0;
}
static const struct of_device_id regulator_userspace_consumer_of_match[] = {
@@ -213,7 +211,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = {
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
- .remove = regulator_userspace_consumer_remove,
+ .remove_new = regulator_userspace_consumer_remove,
.driver = {
.name = "reg-userspace-consumer",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index d5a160efdae6..0a0ee186c6af 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -345,7 +345,7 @@ static int regulator_virtual_probe(struct platform_device *pdev)
return 0;
}
-static int regulator_virtual_remove(struct platform_device *pdev)
+static void regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
@@ -353,13 +353,11 @@ static int regulator_virtual_remove(struct platform_device *pdev)
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
-
- return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
.probe = regulator_virtual_probe,
- .remove = regulator_virtual_remove,
+ .remove_new = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1445bafcab40..9939a5d2cbec 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1158,14 +1158,12 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int wm8350_regulator_remove(struct platform_device *pdev)
+static void wm8350_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
-
- return 0;
}
int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
@@ -1306,7 +1304,7 @@ EXPORT_SYMBOL_GPL(wm8350_register_led);
static struct platform_driver wm8350_regulator_driver = {
.probe = wm8350_regulator_probe,
- .remove = wm8350_regulator_remove,
+ .remove_new = wm8350_regulator_remove,
.driver = {
.name = "wm8350-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 833cfab7d877..7327e81352e9 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1106,12 +1106,6 @@ static void dasd_statistics_removeroot(void)
return;
}
-int dasd_stats_generic_show(struct seq_file *m, void *v)
-{
- seq_puts(m, "Statistics are not activated in this kernel\n");
- return 0;
-}
-
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
index d3f3a611f95b..38c176cf6295 100644
--- a/drivers/s390/cio/vfio_ccw_chp.c
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -115,7 +115,7 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
/* Notify the guest if more CRWs are on our queue */
if (!list_empty(&private->crw) && private->crw_trigger)
- eventfd_signal(private->crw_trigger, 1);
+ eventfd_signal(private->crw_trigger);
return ret;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 43601816ea4e..bfb35cfce1ef 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -112,7 +112,7 @@ void vfio_ccw_sch_io_todo(struct work_struct *work)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
+ eventfd_signal(private->io_trigger);
}
void vfio_ccw_crw_todo(struct work_struct *work)
@@ -122,7 +122,7 @@ void vfio_ccw_crw_todo(struct work_struct *work)
private = container_of(work, struct vfio_ccw_private, crw_work);
if (!list_empty(&private->crw) && private->crw_trigger)
- eventfd_signal(private->crw_trigger, 1);
+ eventfd_signal(private->crw_trigger);
}
/*
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index cba4971618ff..ea532a8a4a0c 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -421,7 +421,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
case VFIO_IRQ_SET_DATA_NONE:
{
if (*ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
}
case VFIO_IRQ_SET_DATA_BOOL:
@@ -432,7 +432,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
return -EFAULT;
if (trigger && *ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
}
case VFIO_IRQ_SET_DATA_EVENTFD:
@@ -612,7 +612,7 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(private->req_trigger, 1);
+ eventfd_signal(private->req_trigger);
} else if (count == 0) {
dev_notice(dev,
"No device request channel registered, blocked until released by user\n");
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 4db538a55192..542b5be73a6a 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1794,7 +1794,7 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(matrix_mdev->req_trigger, 1);
+ eventfd_signal(matrix_mdev->req_trigger);
} else if (count == 0) {
dev_notice(dev,
"No device request registered, blocked until released by user\n");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 70c9dd6b6a31..ddae0fde798e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1177,9 +1177,10 @@ config SPI_ZYNQ_QSPI
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
+ depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
+ This controller only supports SPI memory interface.
config SPI_AMD
tristate "AMD SPI controller"
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 3d1252566134..370c4d1572ed 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -272,7 +272,7 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index c9f1d1e1dcf7..b7ada981464a 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -146,7 +146,7 @@ static int ath79_exec_mem_op(struct spi_mem *mem,
/* Only use for fast-read op. */
if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
op->addr.nbytes != 3 || op->dummy.nbytes != 1)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index b96e55f59d1a..9ace259d2d29 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -6,12 +6,14 @@
*/
#include <linux/clk.h>
+#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/timer.h>
#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
@@ -52,6 +54,7 @@
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
+#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
#define SPI_ENGINE_MISC_SYNC 0x0
#define SPI_ENGINE_MISC_SLEEP 0x1
@@ -78,29 +81,42 @@ struct spi_engine_program {
uint16_t instructions[];
};
-struct spi_engine {
- struct clk *clk;
- struct clk *ref_clk;
-
- spinlock_t lock;
-
- void __iomem *base;
-
- struct spi_message *msg;
+/**
+ * struct spi_engine_message_state - SPI engine per-message state
+ */
+struct spi_engine_message_state {
+ /** @p: Instructions for executing this message. */
struct spi_engine_program *p;
+ /** @cmd_length: Number of elements in cmd_buf array. */
unsigned cmd_length;
+ /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
const uint16_t *cmd_buf;
-
+ /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
struct spi_transfer *tx_xfer;
+ /** @tx_length: Size of tx_buf in bytes. */
unsigned int tx_length;
+ /** @tx_buf: Bytes not yet written to TX FIFO. */
const uint8_t *tx_buf;
-
+ /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
struct spi_transfer *rx_xfer;
+ /** @rx_length: Size of tx_buf in bytes. */
unsigned int rx_length;
+ /** @rx_buf: Bytes not yet written to the RX FIFO. */
uint8_t *rx_buf;
+ /** @sync_id: ID to correlate SYNC interrupts with this message. */
+ u8 sync_id;
+};
+
+struct spi_engine {
+ struct clk *clk;
+ struct clk *ref_clk;
+
+ spinlock_t lock;
- unsigned int sync_id;
- unsigned int completed_id;
+ void __iomem *base;
+ struct ida sync_ida;
+ struct timer_list watchdog_timer;
+ struct spi_controller *controller;
unsigned int int_enable;
};
@@ -127,25 +143,17 @@ static unsigned int spi_engine_get_config(struct spi_device *spi)
return config;
}
-static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
- struct spi_device *spi, struct spi_transfer *xfer)
-{
- unsigned int clk_div;
-
- clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
- xfer->speed_hz * 2);
- if (clk_div > 255)
- clk_div = 255;
- else if (clk_div > 0)
- clk_div -= 1;
-
- return clk_div;
-}
-
static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
struct spi_transfer *xfer)
{
- unsigned int len = xfer->len;
+ unsigned int len;
+
+ if (xfer->bits_per_word <= 8)
+ len = xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ len = xfer->len / 2;
+ else
+ len = xfer->len / 4;
while (len) {
unsigned int n = min(len, 256U);
@@ -163,22 +171,16 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
- struct spi_engine *spi_engine, unsigned int clk_div,
- struct spi_transfer *xfer)
+ int delay_ns, u32 sclk_hz)
{
- unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
unsigned int t;
- int delay;
- delay = spi_delay_to_ns(&xfer->delay, xfer);
- if (delay < 0)
+ /* negative delay indicates error, e.g. from spi_delay_to_ns() */
+ if (delay_ns <= 0)
return;
- delay /= 1000;
- if (delay == 0)
- return;
-
- t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+ /* rounding down since executing the instruction adds a couple of ticks delay */
+ t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
while (t) {
unsigned int n = min(t, 256U);
@@ -195,53 +197,105 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
if (assert)
mask ^= BIT(spi_get_chipselect(spi, 0));
- spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
+}
+
+/*
+ * Performs precompile steps on the message.
+ *
+ * The SPI core does most of the message/transfer validation and filling in
+ * fields for us via __spi_validate(). This fixes up anything remaining not
+ * done there.
+ *
+ * NB: This is separate from spi_engine_compile_message() because the latter
+ * is called twice and would otherwise result in double-evaluation.
+ */
+static void spi_engine_precompile_message(struct spi_message *msg)
+{
+ unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
+ xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
+ }
}
-static int spi_engine_compile_message(struct spi_engine *spi_engine,
- struct spi_message *msg, bool dry, struct spi_engine_program *p)
+static void spi_engine_compile_message(struct spi_message *msg, bool dry,
+ struct spi_engine_program *p)
{
struct spi_device *spi = msg->spi;
+ struct spi_controller *host = spi->controller;
struct spi_transfer *xfer;
int clk_div, new_clk_div;
- bool cs_change = true;
+ bool keep_cs = false;
+ u8 bits_per_word = 0;
- clk_div = -1;
+ clk_div = 1;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
spi_engine_get_config(spi)));
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+ new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
if (new_clk_div != clk_div) {
clk_div = new_clk_div;
+ /* actual divider used is register value + 1 */
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
- clk_div));
+ clk_div - 1));
}
- if (cs_change)
- spi_engine_gen_cs(p, dry, spi, true);
+ if (bits_per_word != xfer->bits_per_word) {
+ bits_per_word = xfer->bits_per_word;
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
+ bits_per_word));
+ }
spi_engine_gen_xfer(p, dry, xfer);
- spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
-
- cs_change = xfer->cs_change;
- if (list_is_last(&xfer->transfer_list, &msg->transfers))
- cs_change = !cs_change;
-
- if (cs_change)
- spi_engine_gen_cs(p, dry, spi, false);
+ spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
+ xfer->effective_speed_hz);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!xfer->cs_off)
+ spi_engine_gen_cs(p, dry, spi, false);
+
+ spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
+ &xfer->cs_change_delay, xfer),
+ xfer->effective_speed_hz);
+
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_engine_gen_cs(p, dry, spi, true);
+ }
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
+ }
}
- return 0;
+ if (!keep_cs)
+ spi_engine_gen_cs(p, dry, spi, false);
+
+ /*
+ * Restore clockdiv to default so that future gen_sleep commands don't
+ * have to be aware of the current register state.
+ */
+ if (clk_div != 1)
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
}
-static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+static void spi_engine_xfer_next(struct spi_message *msg,
struct spi_transfer **_xfer)
{
- struct spi_message *msg = spi_engine->msg;
struct spi_transfer *xfer = *_xfer;
if (!xfer) {
@@ -256,147 +310,192 @@ static void spi_engine_xfer_next(struct spi_engine *spi_engine,
*_xfer = xfer;
}
-static void spi_engine_tx_next(struct spi_engine *spi_engine)
+static void spi_engine_tx_next(struct spi_message *msg)
{
- struct spi_transfer *xfer = spi_engine->tx_xfer;
+ struct spi_engine_message_state *st = msg->state;
+ struct spi_transfer *xfer = st->tx_xfer;
do {
- spi_engine_xfer_next(spi_engine, &xfer);
+ spi_engine_xfer_next(msg, &xfer);
} while (xfer && !xfer->tx_buf);
- spi_engine->tx_xfer = xfer;
+ st->tx_xfer = xfer;
if (xfer) {
- spi_engine->tx_length = xfer->len;
- spi_engine->tx_buf = xfer->tx_buf;
+ st->tx_length = xfer->len;
+ st->tx_buf = xfer->tx_buf;
} else {
- spi_engine->tx_buf = NULL;
+ st->tx_buf = NULL;
}
}
-static void spi_engine_rx_next(struct spi_engine *spi_engine)
+static void spi_engine_rx_next(struct spi_message *msg)
{
- struct spi_transfer *xfer = spi_engine->rx_xfer;
+ struct spi_engine_message_state *st = msg->state;
+ struct spi_transfer *xfer = st->rx_xfer;
do {
- spi_engine_xfer_next(spi_engine, &xfer);
+ spi_engine_xfer_next(msg, &xfer);
} while (xfer && !xfer->rx_buf);
- spi_engine->rx_xfer = xfer;
+ st->rx_xfer = xfer;
if (xfer) {
- spi_engine->rx_length = xfer->len;
- spi_engine->rx_buf = xfer->rx_buf;
+ st->rx_length = xfer->len;
+ st->rx_buf = xfer->rx_buf;
} else {
- spi_engine->rx_buf = NULL;
+ st->rx_buf = NULL;
}
}
-static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
const uint16_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
- while (n && spi_engine->cmd_length) {
- m = min(n, spi_engine->cmd_length);
- buf = spi_engine->cmd_buf;
+ while (n && st->cmd_length) {
+ m = min(n, st->cmd_length);
+ buf = st->cmd_buf;
for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
- spi_engine->cmd_buf += m;
- spi_engine->cmd_length -= m;
+ st->cmd_buf += m;
+ st->cmd_length -= m;
n -= m;
}
- return spi_engine->cmd_length != 0;
+ return st->cmd_length != 0;
}
-static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
- const uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
- while (n && spi_engine->tx_length) {
- m = min(n, spi_engine->tx_length);
- buf = spi_engine->tx_buf;
- for (i = 0; i < m; i++)
- writel_relaxed(buf[i], addr);
- spi_engine->tx_buf += m;
- spi_engine->tx_length -= m;
+ while (n && st->tx_length) {
+ if (st->tx_xfer->bits_per_word <= 8) {
+ const u8 *buf = st->tx_buf;
+
+ m = min(n, st->tx_length);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m;
+ st->tx_length -= m;
+ } else if (st->tx_xfer->bits_per_word <= 16) {
+ const u16 *buf = (const u16 *)st->tx_buf;
+
+ m = min(n, st->tx_length / 2);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m * 2;
+ st->tx_length -= m * 2;
+ } else {
+ const u32 *buf = (const u32 *)st->tx_buf;
+
+ m = min(n, st->tx_length / 4);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m * 4;
+ st->tx_length -= m * 4;
+ }
n -= m;
- if (spi_engine->tx_length == 0)
- spi_engine_tx_next(spi_engine);
+ if (st->tx_length == 0)
+ spi_engine_tx_next(msg);
}
- return spi_engine->tx_length != 0;
+ return st->tx_length != 0;
}
-static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
- uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
- while (n && spi_engine->rx_length) {
- m = min(n, spi_engine->rx_length);
- buf = spi_engine->rx_buf;
- for (i = 0; i < m; i++)
- buf[i] = readl_relaxed(addr);
- spi_engine->rx_buf += m;
- spi_engine->rx_length -= m;
+ while (n && st->rx_length) {
+ if (st->rx_xfer->bits_per_word <= 8) {
+ u8 *buf = st->rx_buf;
+
+ m = min(n, st->rx_length);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m;
+ st->rx_length -= m;
+ } else if (st->rx_xfer->bits_per_word <= 16) {
+ u16 *buf = (u16 *)st->rx_buf;
+
+ m = min(n, st->rx_length / 2);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m * 2;
+ st->rx_length -= m * 2;
+ } else {
+ u32 *buf = (u32 *)st->rx_buf;
+
+ m = min(n, st->rx_length / 4);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m * 4;
+ st->rx_length -= m * 4;
+ }
n -= m;
- if (spi_engine->rx_length == 0)
- spi_engine_rx_next(spi_engine);
+ if (st->rx_length == 0)
+ spi_engine_rx_next(msg);
}
- return spi_engine->rx_length != 0;
+ return st->rx_length != 0;
}
static irqreturn_t spi_engine_irq(int irq, void *devid)
{
struct spi_controller *host = devid;
+ struct spi_message *msg = host->cur_msg;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int disable_int = 0;
unsigned int pending;
+ int completed_id = -1;
pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
if (pending & SPI_ENGINE_INT_SYNC) {
writel_relaxed(SPI_ENGINE_INT_SYNC,
spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
- spi_engine->completed_id = readl_relaxed(
+ completed_id = readl_relaxed(
spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
}
spin_lock(&spi_engine->lock);
if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
- if (!spi_engine_write_cmd_fifo(spi_engine))
+ if (!spi_engine_write_cmd_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
}
if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
- if (!spi_engine_write_tx_fifo(spi_engine))
+ if (!spi_engine_write_tx_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
}
if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
- if (!spi_engine_read_rx_fifo(spi_engine))
+ if (!spi_engine_read_rx_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
}
- if (pending & SPI_ENGINE_INT_SYNC) {
- if (spi_engine->msg &&
- spi_engine->completed_id == spi_engine->sync_id) {
- struct spi_message *msg = spi_engine->msg;
-
- kfree(spi_engine->p);
- msg->status = 0;
- msg->actual_length = msg->frame_length;
- spi_engine->msg = NULL;
- spi_finalize_current_message(host);
+ if (pending & SPI_ENGINE_INT_SYNC && msg) {
+ struct spi_engine_message_state *st = msg->state;
+
+ if (completed_id == st->sync_id) {
+ if (timer_delete_sync(&spi_engine->watchdog_timer)) {
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ spi_finalize_current_message(host);
+ }
disable_int |= SPI_ENGINE_INT_SYNC;
}
}
@@ -412,43 +511,86 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static int spi_engine_transfer_one_message(struct spi_controller *host,
- struct spi_message *msg)
+static int spi_engine_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- unsigned int int_enable = 0;
- unsigned long flags;
+ struct spi_engine_message_state *st;
size_t size;
+ int ret;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ spi_engine_precompile_message(msg);
p_dry.length = 0;
- spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+ spi_engine_compile_message(msg, true, &p_dry);
size = sizeof(*p->instructions) * (p_dry.length + 1);
p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
- if (!p)
+ if (!p) {
+ kfree(st);
return -ENOMEM;
- spi_engine_compile_message(spi_engine, msg, false, p);
+ }
- spin_lock_irqsave(&spi_engine->lock, flags);
- spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
- spi_engine_program_add_cmd(p, false,
- SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+ ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(p);
+ kfree(st);
+ return ret;
+ }
+
+ st->sync_id = ret;
+
+ spi_engine_compile_message(msg, false, p);
+
+ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
+
+ st->p = p;
+ st->cmd_buf = p->instructions;
+ st->cmd_length = p->length;
+ msg->state = st;
+
+ return 0;
+}
+
+static int spi_engine_unprepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_message_state *st = msg->state;
+
+ ida_free(&spi_engine->sync_ida, st->sync_id);
+ kfree(st->p);
+ kfree(st);
- spi_engine->msg = msg;
- spi_engine->p = p;
+ return 0;
+}
- spi_engine->cmd_buf = p->instructions;
- spi_engine->cmd_length = p->length;
- if (spi_engine_write_cmd_fifo(spi_engine))
+static int spi_engine_transfer_one_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_message_state *st = msg->state;
+ unsigned int int_enable = 0;
+ unsigned long flags;
+
+ mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000));
+
+ spin_lock_irqsave(&spi_engine->lock, flags);
+
+ if (spi_engine_write_cmd_fifo(spi_engine, msg))
int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
- spi_engine_tx_next(spi_engine);
- if (spi_engine_write_tx_fifo(spi_engine))
+ spi_engine_tx_next(msg);
+ if (spi_engine_write_tx_fifo(spi_engine, msg))
int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
- spi_engine_rx_next(spi_engine);
- if (spi_engine->rx_length != 0)
+ spi_engine_rx_next(msg);
+ if (st->rx_length != 0)
int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
int_enable |= SPI_ENGINE_INT_SYNC;
@@ -461,6 +603,29 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
return 0;
}
+static void spi_engine_timeout(struct timer_list *timer)
+{
+ struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer);
+ struct spi_controller *host = spi_engine->controller;
+
+ if (WARN_ON(!host->cur_msg))
+ return;
+
+ dev_err(&host->dev,
+ "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
+ host->cur_msg->status = -ETIMEDOUT;
+ spi_finalize_current_message(host);
+}
+
+static void spi_engine_release_hw(void *p)
+{
+ struct spi_engine *spi_engine = p;
+
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+}
+
static int spi_engine_probe(struct platform_device *pdev)
{
struct spi_engine *spi_engine;
@@ -473,35 +638,28 @@ static int spi_engine_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
- if (!spi_engine)
- return -ENOMEM;
-
- host = spi_alloc_host(&pdev->dev, 0);
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
if (!host)
return -ENOMEM;
- spi_controller_set_devdata(host, spi_engine);
+ spi_engine = spi_controller_get_devdata(host);
spin_lock_init(&spi_engine->lock);
+ ida_init(&spi_engine->sync_ida);
+ timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE);
+ spi_engine->controller = host;
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(spi_engine->clk)) {
- ret = PTR_ERR(spi_engine->clk);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->clk))
+ return PTR_ERR(spi_engine->clk);
spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
- if (IS_ERR(spi_engine->ref_clk)) {
- ret = PTR_ERR(spi_engine->ref_clk);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->ref_clk))
+ return PTR_ERR(spi_engine->ref_clk);
spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(spi_engine->base)) {
- ret = PTR_ERR(spi_engine->base);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->base))
+ return PTR_ERR(spi_engine->base);
version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
@@ -509,54 +667,42 @@ static int spi_engine_probe(struct platform_device *pdev)
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
- ret = -ENODEV;
- goto err_put_host;
+ return -ENODEV;
}
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
- ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
+ ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
+ spi_engine);
if (ret)
- goto err_put_host;
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
+ host);
+ if (ret)
+ return ret;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
- host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
host->transfer_one_message = spi_engine_transfer_one_message;
+ host->prepare_message = spi_engine_prepare_message;
+ host->unprepare_message = spi_engine_unprepare_message;
host->num_chipselect = 8;
- ret = spi_register_controller(host);
+ if (host->max_speed_hz == 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
+
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
- goto err_free_irq;
+ return ret;
platform_set_drvdata(pdev, host);
return 0;
-err_free_irq:
- free_irq(irq, host);
-err_put_host:
- spi_controller_put(host);
- return ret;
-}
-
-static void spi_engine_remove(struct platform_device *pdev)
-{
- struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
- struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- int irq = platform_get_irq(pdev, 0);
-
- spi_unregister_controller(host);
-
- free_irq(irq, host);
-
- spi_controller_put(host);
-
- writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
- writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
- writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
}
static const struct of_device_id spi_engine_match_table[] = {
@@ -567,7 +713,6 @@ MODULE_DEVICE_TABLE(of, spi_engine_match_table);
static struct platform_driver spi_engine_driver = {
.probe = spi_engine_probe,
- .remove_new = spi_engine_remove,
.driver = {
.name = "spi-engine",
.of_match_table = spi_engine_match_table,
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index ef08fcac2f6d..d96222e6d7d2 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1199,7 +1199,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
op->data.dir != SPI_MEM_DATA_IN)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
buf = op->data.buf.in;
addr = op->addr.val;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 3d7bf62da11c..f94e0d370d46 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1840,7 +1840,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->jh7110_clk_init) {
ret = cqspi_jh7110_clk_init(pdev, cqspi);
if (ret)
- goto probe_clk_failed;
+ goto probe_reset_failed;
}
if (of_device_is_compatible(pdev->dev.of_node,
@@ -1901,6 +1901,8 @@ static int cqspi_probe(struct platform_device *pdev)
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
return ret;
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index b7e04b03be58..8648b8eb080d 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -619,7 +619,6 @@ MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
static struct platform_driver cdns_xspi_platform_driver = {
.probe = cdns_xspi_probe,
- .remove = NULL,
.driver = {
.name = CDNS_XSPI_NAME,
.of_match_table = cdns_xspi_of_match,
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index d239fc5a49cc..f13073e12593 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -213,7 +213,7 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*priv->ctlr));
+ priv->ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*priv->ctlr));
if (!priv->ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 46801189a651..cc74cbe03431 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -411,7 +411,6 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
- { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
{
.compatible = "intel,mountevans-imc-ssi",
.data = dw_spi_mountevans_imc_init,
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index b956a32a4162..15f84e68d4d2 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -145,10 +145,10 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
-static void handle_se_timeout(struct spi_master *spi,
- struct spi_message *msg)
+static void handle_se_timeout(struct spi_controller *spi,
+ struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
unsigned long time_left;
struct geni_se *se = &mas->se;
const struct spi_transfer *xfer;
@@ -160,9 +160,9 @@ static void handle_se_timeout(struct spi_master *spi,
xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
- if (spi->slave) {
+ if (spi->target) {
/*
- * skip CMD Cancel sequnece since spi slave
+ * skip CMD Cancel sequnece since spi target
* doesn`t support CMD Cancel sequnece
*/
spin_unlock_irq(&mas->lock);
@@ -225,17 +225,17 @@ reset_if_dma:
}
}
-static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
+static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
dmaengine_terminate_sync(mas->tx);
dmaengine_terminate_sync(mas->rx);
}
-static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
+static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
@@ -286,8 +286,8 @@ static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
- struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
- struct spi_master *spi = dev_get_drvdata(mas->dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
+ struct spi_controller *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long time_left;
@@ -395,9 +395,9 @@ static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
}
static int setup_fifo_params(struct spi_device *spi_slv,
- struct spi_master *spi)
+ struct spi_controller *spi)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
u32 demux_sel;
@@ -434,7 +434,7 @@ static int setup_fifo_params(struct spi_device *spi_slv,
static void
spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
{
- struct spi_master *spi = cb;
+ struct spi_controller *spi = cb;
spi->cur_msg->status = -EIO;
if (result->result != DMA_TRANS_NOERROR) {
@@ -454,7 +454,7 @@ spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
}
static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
- struct spi_device *spi_slv, struct spi_master *spi)
+ struct spi_device *spi_slv, struct spi_controller *spi)
{
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_slave_config config = {};
@@ -560,14 +560,14 @@ static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
static bool geni_can_dma(struct spi_controller *ctlr,
struct spi_device *slv, struct spi_transfer *xfer)
{
- struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+ struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
u32 len, fifo_size;
if (mas->cur_xfer_mode == GENI_GPI_DMA)
return true;
- /* Set SE DMA mode for SPI slave. */
- if (ctlr->slave)
+ /* Set SE DMA mode for SPI target. */
+ if (ctlr->target)
return true;
len = get_xfer_len_in_words(xfer, mas);
@@ -579,10 +579,10 @@ static bool geni_can_dma(struct spi_controller *ctlr,
return false;
}
-static int spi_geni_prepare_message(struct spi_master *spi,
- struct spi_message *spi_msg)
+static int spi_geni_prepare_message(struct spi_controller *spi,
+ struct spi_message *spi_msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
switch (mas->cur_xfer_mode) {
@@ -657,7 +657,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
proto = geni_se_read_proto(se);
- if (spi->slave) {
+ if (spi->target) {
if (proto != GENI_SE_SPI_SLAVE) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
@@ -715,7 +715,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
}
/* We always control CS manually */
- if (!spi->slave) {
+ if (!spi->target) {
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
@@ -824,7 +824,7 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
- u16 mode, struct spi_master *spi)
+ u16 mode, struct spi_controller *spi)
{
u32 m_cmd = 0;
u32 len;
@@ -913,11 +913,11 @@ static int setup_se_xfer(struct spi_transfer *xfer,
return ret;
}
-static int spi_geni_transfer_one(struct spi_master *spi,
- struct spi_device *slv,
- struct spi_transfer *xfer)
+static int spi_geni_transfer_one(struct spi_controller *spi,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
if (spi_geni_is_abort_still_pending(mas))
@@ -939,8 +939,8 @@ static int spi_geni_transfer_one(struct spi_master *spi,
static irqreturn_t geni_spi_isr(int irq, void *data)
{
- struct spi_master *spi = data;
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = data;
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 m_irq;
@@ -1042,7 +1042,7 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
static int spi_geni_probe(struct platform_device *pdev)
{
int ret, irq;
- struct spi_master *spi;
+ struct spi_controller *spi;
struct spi_geni_master *mas;
void __iomem *base;
struct clk *clk;
@@ -1064,12 +1064,12 @@ static int spi_geni_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- spi = devm_spi_alloc_master(dev, sizeof(*mas));
+ spi = devm_spi_alloc_host(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
- mas = spi_master_get_devdata(spi);
+ mas = spi_controller_get_devdata(spi);
mas->irq = irq;
mas->dev = dev;
mas->se.dev = dev;
@@ -1113,7 +1113,7 @@ static int spi_geni_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (device_property_read_bool(&pdev->dev, "spi-slave"))
- spi->slave = true;
+ spi->target = true;
ret = geni_icc_get(&mas->se, NULL);
if (ret)
@@ -1135,7 +1135,7 @@ static int spi_geni_probe(struct platform_device *pdev)
* for dma (gsi) mode, the gsi will set cs based on params passed in
* TRE
*/
- if (!spi->slave && mas->cur_xfer_mode == GENI_SE_FIFO)
+ if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
/*
@@ -1148,7 +1148,7 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_release_dma;
- ret = spi_register_master(spi);
+ ret = spi_register_controller(spi);
if (ret)
goto spi_geni_probe_free_irq;
@@ -1164,11 +1164,11 @@ spi_geni_probe_runtime_disable:
static void spi_geni_remove(struct platform_device *pdev)
{
- struct spi_master *spi = platform_get_drvdata(pdev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = platform_get_drvdata(pdev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
- spi_unregister_master(spi);
+ spi_unregister_controller(spi);
spi_geni_release_dma_chan(mas);
@@ -1178,8 +1178,8 @@ static void spi_geni_remove(struct platform_device *pdev)
static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
/* Drop the performance state vote */
@@ -1194,8 +1194,8 @@ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
ret = geni_icc_enable(&mas->se);
@@ -1211,30 +1211,30 @@ static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
static int __maybe_unused spi_geni_suspend(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_controller *spi = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(spi);
+ ret = spi_controller_suspend(spi);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
- spi_master_resume(spi);
+ spi_controller_resume(spi);
return ret;
}
static int __maybe_unused spi_geni_resume(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_controller *spi = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
- ret = spi_master_resume(spi);
+ ret = spi_controller_resume(spi);
if (ret)
pm_runtime_force_suspend(dev);
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
index cc366936d72b..003a6d21c4c3 100644
--- a/drivers/spi/spi-ingenic.c
+++ b/drivers/spi/spi-ingenic.c
@@ -346,14 +346,17 @@ static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
static int spi_ingenic_request_dma(struct spi_controller *ctlr,
struct device *dev)
{
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx)
- return -ENODEV;
+ struct dma_chan *chan;
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+ chan = dma_request_chan(dev, "tx");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ ctlr->dma_tx = chan;
- if (!ctlr->dma_rx)
- return -ENODEV;
+ chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ ctlr->dma_rx = chan;
ctlr->can_dma = spi_ingenic_can_dma;
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 98ec4dc22b81..3654ae35d2db 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -711,8 +711,7 @@ static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
{
if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
- iop->mem_op.cmd.dtr != op->cmd.dtr ||
- iop->mem_op.cmd.opcode != op->cmd.opcode)
+ iop->mem_op.cmd.dtr != op->cmd.dtr)
return false;
if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
@@ -737,11 +736,12 @@ intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
const struct intel_spi_mem_op *iop;
for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
- if (intel_spi_cmp_mem_op(iop, op))
- break;
+ if (iop->mem_op.cmd.opcode == op->cmd.opcode &&
+ intel_spi_cmp_mem_op(iop, op))
+ return iop;
}
- return iop->mem_op.cmd.opcode ? iop : NULL;
+ return NULL;
}
static bool intel_spi_supports_mem_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c
index c5a066c73817..1cc1422ddba0 100644
--- a/drivers/spi/spi-ljca.c
+++ b/drivers/spi/spi-ljca.c
@@ -223,7 +223,7 @@ static int ljca_spi_probe(struct auxiliary_device *auxdev,
struct ljca_spi_dev *ljca_spi;
int ret;
- controller = devm_spi_alloc_master(&auxdev->dev, sizeof(*ljca_spi));
+ controller = devm_spi_alloc_host(&auxdev->dev, sizeof(*ljca_spi));
if (!controller)
return -ENOMEM;
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index edd7430d4c05..2dc8ceb85374 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -323,7 +323,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return ret;
if (!spi_mem_internal_supports_op(mem, op))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
ret = spi_mem_access_start(mem);
@@ -339,7 +339,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP)
+ if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP)
return ret;
}
@@ -559,7 +559,7 @@ spi_mem_dirmap_create(struct spi_mem *mem,
if (ret) {
desc->nodirmap = true;
if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
else
ret = 0;
}
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 4a6c984b6bff..d5ac60c135c2 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 03db9f016a11..f3bb8bbc192f 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -556,7 +556,7 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
op->data.nbytes);
if (fiu->spix_mode || op->addr.nbytes > 4)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (fiu->clkrate != chip->clkrate) {
ret = clk_set_rate(fiu->clk, chip->clkrate);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index d1b6110b38fc..de63cf0557ce 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -338,14 +338,8 @@ struct vendor_data {
* @clk: outgoing clock "SPICLK" for the SPI bus
* @host: SPI framework hookup
* @host_info: controller-specific data from machine setup
- * @pump_transfers: Tasklet used in Interrupt Transfer mode
- * @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
- * @next_msg_cs_active: the next message in the queue has been examined
- * and it was found that it uses the same chip select as the previous
- * message, so we left it active after the previous transfer, and it's
- * active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
@@ -362,7 +356,6 @@ struct vendor_data {
* @dummypage: a dummy page used for driving data on the bus with DMA
* @dma_running: indicates whether DMA is in operation
* @cur_cs: current chip select index
- * @cur_gpiod: current chip select GPIO descriptor
*/
struct pl022 {
struct amba_device *adev;
@@ -372,12 +365,8 @@ struct pl022 {
struct clk *clk;
struct spi_controller *host;
struct pl022_ssp_controller *host_info;
- /* Message per-transfer pump */
- struct tasklet_struct pump_transfers;
- struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
- bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
@@ -397,7 +386,6 @@ struct pl022 {
bool dma_running;
#endif
int cur_cs;
- struct gpio_desc *cur_gpiod;
};
/**
@@ -431,99 +419,29 @@ struct chip_data {
/**
* internal_cs_control - Control chip select signals via SSP_CSR.
* @pl022: SSP driver private data structure
- * @command: select/delect the chip
+ * @enable: select/delect the chip
*
* Used on controller with internal chip select control via SSP_CSR register
* (vendor extension). Each of the 5 LSB in the register controls one chip
* select signal.
*/
-static void internal_cs_control(struct pl022 *pl022, u32 command)
+static void internal_cs_control(struct pl022 *pl022, bool enable)
{
u32 tmp;
tmp = readw(SSP_CSR(pl022->virtbase));
- if (command == SSP_CHIP_SELECT)
+ if (enable)
tmp &= ~BIT(pl022->cur_cs);
else
tmp |= BIT(pl022->cur_cs);
writew(tmp, SSP_CSR(pl022->virtbase));
}
-static void pl022_cs_control(struct pl022 *pl022, u32 command)
+static void pl022_cs_control(struct spi_device *spi, bool enable)
{
+ struct pl022 *pl022 = spi_controller_get_devdata(spi->controller);
if (pl022->vendor->internal_cs_ctrl)
- internal_cs_control(pl022, command);
- else if (pl022->cur_gpiod)
- /*
- * This needs to be inverted since with GPIOLIB in
- * control, the inversion will be handled by
- * GPIOLIB's active low handling. The "command"
- * passed into this function will be SSP_CHIP_SELECT
- * which is enum:ed to 0, so we need the inverse
- * (1) to activate chip select.
- */
- gpiod_set_value(pl022->cur_gpiod, !command);
-}
-
-/**
- * giveback - current spi_message is over, schedule next message and call
- * callback of this message. Assumes that caller already
- * set message->status; dma and pio irqs are blocked
- * @pl022: SSP driver private data structure
- */
-static void giveback(struct pl022 *pl022)
-{
- struct spi_transfer *last_transfer;
- pl022->next_msg_cs_active = false;
-
- last_transfer = list_last_entry(&pl022->cur_msg->transfers,
- struct spi_transfer, transfer_list);
-
- /* Delay if requested before any change in chip select */
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- spi_transfer_delay_exec(last_transfer);
-
- if (!last_transfer->cs_change) {
- struct spi_message *next_msg;
-
- /*
- * cs_change was not set. We can keep the chip select
- * enabled if there is message in the queue and it is
- * for the same spi device.
- *
- * We cannot postpone this until pump_messages, because
- * after calling msg->complete (below) the driver that
- * sent the current message could be unloaded, which
- * could invalidate the cs_control() callback...
- */
- /* get a pointer to the next message, if any */
- next_msg = spi_get_next_queued_message(pl022->host);
-
- /*
- * see if the next and current messages point
- * to the same spi device.
- */
- if (next_msg && next_msg->spi != pl022->cur_msg->spi)
- next_msg = NULL;
- if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- else
- pl022->next_msg_cs_active = true;
-
- }
-
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
-
- /* disable the SPI/SSP operation */
- writew((readw(SSP_CR1(pl022->virtbase)) &
- (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
-
- spi_finalize_current_message(pl022->host);
+ internal_cs_control(pl022, enable);
}
/**
@@ -757,30 +675,6 @@ static void readwriter(struct pl022 *pl022)
*/
}
-/**
- * next_transfer - Move to the Next transfer in the current spi message
- * @pl022: SSP driver private data structure
- *
- * This function moves though the linked list of spi transfers in the
- * current spi message and returns with the state of current spi
- * message i.e whether its last transfer is done(STATE_DONE) or
- * Next transfer is ready(STATE_RUNNING)
- */
-static void *next_transfer(struct pl022 *pl022)
-{
- struct spi_message *msg = pl022->cur_msg;
- struct spi_transfer *trans = pl022->cur_transfer;
-
- /* Move to next transfer */
- if (trans->transfer_list.next != &msg->transfers) {
- pl022->cur_transfer =
- list_entry(trans->transfer_list.next,
- struct spi_transfer, transfer_list);
- return STATE_RUNNING;
- }
- return STATE_DONE;
-}
-
/*
* This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine.
@@ -800,7 +694,6 @@ static void unmap_free_dma_scatter(struct pl022 *pl022)
static void dma_callback(void *data)
{
struct pl022 *pl022 = data;
- struct spi_message *msg = pl022->cur_msg;
BUG_ON(!pl022->sgt_rx.sgl);
@@ -845,13 +738,7 @@ static void dma_callback(void *data)
unmap_free_dma_scatter(pl022);
- /* Update total bytes transferred */
- msg->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- msg->state = next_transfer(pl022);
- if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- tasklet_schedule(&pl022->pump_transfers);
+ spi_finalize_current_transfer(pl022->host);
}
static void setup_dma_scatter(struct pl022 *pl022,
@@ -1189,6 +1076,9 @@ err_no_rxchan:
static void terminate_dma(struct pl022 *pl022)
{
+ if (!pl022->dma_running)
+ return;
+
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
@@ -1200,8 +1090,7 @@ static void terminate_dma(struct pl022 *pl022)
static void pl022_dma_remove(struct pl022 *pl022)
{
- if (pl022->dma_running)
- terminate_dma(pl022);
+ terminate_dma(pl022);
if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel);
if (pl022->dma_rx_channel)
@@ -1225,6 +1114,10 @@ static inline int pl022_dma_probe(struct pl022 *pl022)
return 0;
}
+static inline void terminate_dma(struct pl022 *pl022)
+{
+}
+
static inline void pl022_dma_remove(struct pl022 *pl022)
{
}
@@ -1246,16 +1139,7 @@ static inline void pl022_dma_remove(struct pl022 *pl022)
static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
{
struct pl022 *pl022 = dev_id;
- struct spi_message *msg = pl022->cur_msg;
u16 irq_status = 0;
-
- if (unlikely(!msg)) {
- dev_err(&pl022->adev->dev,
- "bad message state in interrupt handler");
- /* Never fail */
- return IRQ_HANDLED;
- }
-
/* Read the Interrupt Status Register */
irq_status = readw(SSP_MIS(pl022->virtbase));
@@ -1287,10 +1171,8 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- msg->state = STATE_ERROR;
-
- /* Schedule message queue handler */
- tasklet_schedule(&pl022->pump_transfers);
+ pl022->cur_transfer->error |= SPI_TRANS_FAIL_IO;
+ spi_finalize_current_transfer(pl022->host);
return IRQ_HANDLED;
}
@@ -1318,13 +1200,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
"number of bytes on a 16bit bus?)\n",
(u32) (pl022->rx - pl022->rx_end));
}
- /* Update total bytes transferred */
- msg->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- msg->state = next_transfer(pl022);
- if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- tasklet_schedule(&pl022->pump_transfers);
+ spi_finalize_current_transfer(pl022->host);
return IRQ_HANDLED;
}
@@ -1361,98 +1237,20 @@ static int set_up_next_transfer(struct pl022 *pl022,
return 0;
}
-/**
- * pump_transfers - Tasklet function which schedules next transfer
- * when running in interrupt or DMA transfer mode.
- * @data: SSP driver private data structure
- *
- */
-static void pump_transfers(unsigned long data)
+static int do_interrupt_dma_transfer(struct pl022 *pl022)
{
- struct pl022 *pl022 = (struct pl022 *) data;
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
-
- /* Get current state information */
- message = pl022->cur_msg;
- transfer = pl022->cur_transfer;
-
- /* Handle for abort */
- if (message->state == STATE_ERROR) {
- message->status = -EIO;
- giveback(pl022);
- return;
- }
-
- /* Handle end of message */
- if (message->state == STATE_DONE) {
- message->status = 0;
- giveback(pl022);
- return;
- }
-
- /* Delay if requested at end of transfer before CS change */
- if (message->state == STATE_RUNNING) {
- previous = list_entry(transfer->transfer_list.prev,
- struct spi_transfer,
- transfer_list);
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- spi_transfer_delay_exec(previous);
-
- /* Reselect chip select only if cs_change was requested */
- if (previous->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- } else {
- /* STATE_START */
- message->state = STATE_RUNNING;
- }
-
- if (set_up_next_transfer(pl022, transfer)) {
- message->state = STATE_ERROR;
- message->status = -EIO;
- giveback(pl022);
- return;
- }
- /* Flush the FIFOs and let's go! */
- flush(pl022);
-
- if (pl022->cur_chip->enable_dma) {
- if (configure_dma(pl022)) {
- dev_dbg(&pl022->adev->dev,
- "configuration of DMA failed, fall back to interrupt mode\n");
- goto err_config_dma;
- }
- return;
- }
-
-err_config_dma:
- /* enable all interrupts except RX */
- writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
-}
+ int ret;
-static void do_interrupt_dma_transfer(struct pl022 *pl022)
-{
/*
* Default is to enable all interrupts except RX -
* this will be enabled once TX is complete
*/
u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
- /* Enable target chip, if not already active */
- if (!pl022->next_msg_cs_active)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ ret = set_up_next_transfer(pl022, pl022->cur_transfer);
+ if (ret)
+ return ret;
- if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
- /* Error path */
- pl022->cur_msg->state = STATE_ERROR;
- pl022->cur_msg->status = -EIO;
- giveback(pl022);
- return;
- }
/* If we're using DMA, set up DMA here */
if (pl022->cur_chip->enable_dma) {
/* Configure DMA transfer */
@@ -1469,6 +1267,7 @@ err_config_dma:
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
writew(irqflags, SSP_IMSC(pl022->virtbase));
+ return 1;
}
static void print_current_status(struct pl022 *pl022)
@@ -1495,111 +1294,65 @@ static void print_current_status(struct pl022 *pl022)
}
-static void do_polling_transfer(struct pl022 *pl022)
+static int do_polling_transfer(struct pl022 *pl022)
{
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
+ int ret;
unsigned long time, timeout;
- message = pl022->cur_msg;
-
- while (message->state != STATE_DONE) {
- /* Handle for abort */
- if (message->state == STATE_ERROR)
- break;
- transfer = pl022->cur_transfer;
-
- /* Delay if requested at end of transfer */
- if (message->state == STATE_RUNNING) {
- previous =
- list_entry(transfer->transfer_list.prev,
- struct spi_transfer, transfer_list);
- spi_transfer_delay_exec(previous);
- if (previous->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- } else {
- /* STATE_START */
- message->state = STATE_RUNNING;
- if (!pl022->next_msg_cs_active)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- }
-
- /* Configuration Changing Per Transfer */
- if (set_up_next_transfer(pl022, transfer)) {
- /* Error path */
- message->state = STATE_ERROR;
- break;
- }
- /* Flush FIFOs and enable SSP */
- flush(pl022);
- writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
- SSP_CR1(pl022->virtbase));
-
- dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
-
- timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
- while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
- time = jiffies;
- readwriter(pl022);
- if (time_after(time, timeout)) {
- dev_warn(&pl022->adev->dev,
- "%s: timeout!\n", __func__);
- message->state = STATE_TIMEOUT;
- print_current_status(pl022);
- goto out;
- }
- cpu_relax();
+ /* Configuration Changing Per Transfer */
+ ret = set_up_next_transfer(pl022, pl022->cur_transfer);
+ if (ret)
+ return ret;
+ /* Flush FIFOs and enable SSP */
+ flush(pl022);
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
+
+ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
+ time = jiffies;
+ readwriter(pl022);
+ if (time_after(time, timeout)) {
+ dev_warn(&pl022->adev->dev,
+ "%s: timeout!\n", __func__);
+ print_current_status(pl022);
+ return -ETIMEDOUT;
}
-
- /* Update total byte transferred */
- message->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- message->state = next_transfer(pl022);
- if (message->state != STATE_DONE
- && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ cpu_relax();
}
-out:
- /* Handle end of message */
- if (message->state == STATE_DONE)
- message->status = 0;
- else if (message->state == STATE_TIMEOUT)
- message->status = -EAGAIN;
- else
- message->status = -EIO;
- giveback(pl022);
- return;
+ return 0;
}
-static int pl022_transfer_one_message(struct spi_controller *host,
- struct spi_message *msg)
+static int pl022_transfer_one(struct spi_controller *host, struct spi_device *spi,
+ struct spi_transfer *transfer)
{
struct pl022 *pl022 = spi_controller_get_devdata(host);
- /* Initial message state */
- pl022->cur_msg = msg;
- msg->state = STATE_START;
-
- pl022->cur_transfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
+ pl022->cur_transfer = transfer;
/* Setup the SPI using the per chip configuration */
- pl022->cur_chip = spi_get_ctldata(msg->spi);
- pl022->cur_cs = spi_get_chipselect(msg->spi, 0);
- /* This is always available but may be set to -ENOENT */
- pl022->cur_gpiod = spi_get_csgpiod(msg->spi, 0);
+ pl022->cur_chip = spi_get_ctldata(spi);
+ pl022->cur_cs = spi_get_chipselect(spi, 0);
restore_state(pl022);
flush(pl022);
if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
- do_polling_transfer(pl022);
+ return do_polling_transfer(pl022);
else
- do_interrupt_dma_transfer(pl022);
+ return do_interrupt_dma_transfer(pl022);
+}
- return 0;
+static void pl022_handle_err(struct spi_controller *ctlr, struct spi_message *message)
+{
+ struct pl022 *pl022 = spi_controller_get_devdata(ctlr);
+
+ terminate_dma(pl022);
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
static int pl022_unprepare_transfer_hardware(struct spi_controller *host)
@@ -2138,7 +1891,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
host->cleanup = pl022_cleanup;
host->setup = pl022_setup;
host->auto_runtime_pm = true;
- host->transfer_one_message = pl022_transfer_one_message;
+ host->transfer_one = pl022_transfer_one;
+ host->set_cs = pl022_cs_control;
+ host->handle_err = pl022_handle_err;
host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
host->rt = platform_info->rt;
host->dev.of_node = dev->of_node;
@@ -2175,10 +1930,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- /* Initialize transfer pump */
- tasklet_init(&pl022->pump_transfers, pump_transfers,
- (unsigned long)pl022);
-
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
@@ -2261,7 +2012,6 @@ pl022_remove(struct amba_device *adev)
pl022_dma_remove(pl022);
amba_release_regions(adev);
- tasklet_disable(&pl022->pump_transfers);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fb452bc78372..cfc3b1ddbd22 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -29,12 +29,15 @@
#include <asm/unaligned.h>
+#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
+
struct sh_msiof_chipdata {
u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
u16 ctlr_flags;
u16 min_div_pow;
+ u32 flags;
};
struct sh_msiof_spi_priv {
@@ -1072,6 +1075,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
.min_div_pow = 1,
};
+static const struct sh_msiof_chipdata rcar_r8a7795_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
+};
+
static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
@@ -1082,6 +1095,7 @@ static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
@@ -1279,6 +1293,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
+ info->dtdl = 200;
+
if (info->mode == MSIOF_SPI_TARGET)
ctlr = spi_alloc_target(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index bf01feedbf93..262c11d977ea 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -138,8 +138,7 @@ struct sprd_adi_data {
u32 slave_offset;
u32 slave_addr_size;
int (*read_check)(u32 val, u32 reg);
- int (*restart)(struct notifier_block *this,
- unsigned long mode, void *cmd);
+ int (*restart)(struct sys_off_data *data);
void (*wdg_rst)(void *p);
};
@@ -150,7 +149,6 @@ struct sprd_adi {
struct hwspinlock *hwlock;
unsigned long slave_vbase;
unsigned long slave_pbase;
- struct notifier_block restart_handler;
const struct sprd_adi_data *data;
};
@@ -370,11 +368,9 @@ static void sprd_adi_set_wdt_rst_mode(void *p)
#endif
}
-static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
- void *cmd, struct sprd_adi_wdg *wdg)
+static int sprd_adi_restart(struct sprd_adi *sadi, unsigned long mode,
+ const char *cmd, struct sprd_adi_wdg *wdg)
{
- struct sprd_adi *sadi = container_of(this, struct sprd_adi,
- restart_handler);
u32 val, reboot_mode = 0;
if (!cmd)
@@ -448,8 +444,7 @@ static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-static int sprd_adi_restart_sc9860(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int sprd_adi_restart_sc9860(struct sys_off_data *data)
{
struct sprd_adi_wdg wdg = {
.base = PMIC_WDG_BASE,
@@ -458,7 +453,7 @@ static int sprd_adi_restart_sc9860(struct notifier_block *this,
.wdg_clk = PMIC_CLK_EN,
};
- return sprd_adi_restart(this, mode, cmd, &wdg);
+ return sprd_adi_restart(data->cb_data, data->mode, data->cmd, &wdg);
}
static void sprd_adi_hw_init(struct sprd_adi *sadi)
@@ -533,7 +528,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
pdev->id = of_alias_get_id(np, "spi");
num_chipselect = of_get_child_count(np);
- ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(struct sprd_adi));
if (!ctlr)
return -ENOMEM;
@@ -590,9 +585,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
}
if (sadi->data->restart) {
- sadi->restart_handler.notifier_call = sadi->data->restart;
- sadi->restart_handler.priority = 128;
- ret = register_restart_handler(&sadi->restart_handler);
+ ret = devm_register_restart_handler(&pdev->dev,
+ sadi->data->restart,
+ sadi);
if (ret) {
dev_err(&pdev->dev, "can not register restart handler\n");
goto put_ctlr;
@@ -606,14 +601,6 @@ put_ctlr:
return ret;
}
-static void sprd_adi_remove(struct platform_device *pdev)
-{
- struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
- struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
-
- unregister_restart_handler(&sadi->restart_handler);
-}
-
static struct sprd_adi_data sc9860_data = {
.slave_offset = ADI_10BIT_SLAVE_OFFSET,
.slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
@@ -657,7 +644,6 @@ static struct platform_driver sprd_adi_driver = {
.of_match_table = sprd_adi_of_match,
},
.probe = sprd_adi_probe,
- .remove_new = sprd_adi_remove,
};
module_platform_driver(sprd_adi_driver);
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 95377cf748c0..831ebae10fe0 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -578,7 +578,7 @@ static void sprd_spi_dma_release(struct sprd_spi *ss)
static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
struct spi_transfer *t)
{
- struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len;
int ret, write_size = 0;
@@ -923,7 +923,7 @@ static int sprd_spi_probe(struct platform_device *pdev)
int ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
- sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
+ sctlr = spi_alloc_host(&pdev->dev, sizeof(*ss));
if (!sctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 7fcff9c539e2..e064025e2fd6 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -6,7 +6,7 @@
* Patrice Chotard <patrice.chotard@st.com>
* Lee Jones <lee.jones@linaro.org>
*
- * SPI master mode controller driver, used in STMicroelectronics devices.
+ * SPI host mode controller driver, used in STMicroelectronics devices.
*/
#include <linux/clk.h>
@@ -115,10 +115,10 @@ static void ssc_read_rx_fifo(struct spi_st *spi_st)
spi_st->words_remaining -= count;
}
-static int spi_st_transfer_one(struct spi_master *master,
+static int spi_st_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
uint32_t ctl = 0;
/* Setup transfer */
@@ -165,7 +165,7 @@ static int spi_st_transfer_one(struct spi_master *master,
if (ctl)
writel_relaxed(ctl, spi_st->base + SSC_CTL);
- spi_finalize_current_transfer(spi->master);
+ spi_finalize_current_transfer(spi->controller);
return t->len;
}
@@ -174,7 +174,7 @@ static int spi_st_transfer_one(struct spi_master *master,
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
static int spi_st_setup(struct spi_device *spi)
{
- struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+ struct spi_st *spi_st = spi_controller_get_devdata(spi->controller);
u32 spi_st_clk, sscbrg, var;
u32 hz = spi->max_speed_hz;
@@ -274,35 +274,35 @@ static irqreturn_t spi_st_irq(int irq, void *dev_id)
static int spi_st_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct spi_st *spi_st;
int irq, ret = 0;
u32 var;
- master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*spi_st));
+ if (!host)
return -ENOMEM;
- master->dev.of_node = np;
- master->mode_bits = MODEBITS;
- master->setup = spi_st_setup;
- master->transfer_one = spi_st_transfer_one;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
- master->use_gpio_descriptors = true;
- spi_st = spi_master_get_devdata(master);
+ host->dev.of_node = np;
+ host->mode_bits = MODEBITS;
+ host->setup = spi_st_setup;
+ host->transfer_one = spi_st_transfer_one;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ host->auto_runtime_pm = true;
+ host->bus_num = pdev->id;
+ host->use_gpio_descriptors = true;
+ spi_st = spi_controller_get_devdata(host);
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
if (IS_ERR(spi_st->clk)) {
dev_err(&pdev->dev, "Unable to request clock\n");
ret = PTR_ERR(spi_st->clk);
- goto put_master;
+ goto put_host;
}
ret = clk_prepare_enable(spi_st->clk);
if (ret)
- goto put_master;
+ goto put_host;
init_completion(&spi_st->done);
@@ -324,7 +324,7 @@ static int spi_st_probe(struct platform_device *pdev)
var &= ~SSC_CTL_SR;
writel_relaxed(var, spi_st->base + SSC_CTL);
- /* Set SSC into slave mode before reconfiguring PIO pins */
+ /* Set SSC into target mode before reconfiguring PIO pins */
var = readl_relaxed(spi_st->base + SSC_CTL);
var &= ~SSC_CTL_MS;
writel_relaxed(var, spi_st->base + SSC_CTL);
@@ -347,11 +347,11 @@ static int spi_st_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "Failed to register master\n");
+ dev_err(&pdev->dev, "Failed to register host\n");
goto rpm_disable;
}
@@ -361,15 +361,15 @@ rpm_disable:
pm_runtime_disable(&pdev->dev);
clk_disable:
clk_disable_unprepare(spi_st->clk);
-put_master:
- spi_master_put(master);
+put_host:
+ spi_controller_put(host);
return ret;
}
static void spi_st_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
@@ -381,8 +381,8 @@ static void spi_st_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int spi_st_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
writel_relaxed(0, spi_st->base + SSC_IEN);
pinctrl_pm_select_sleep_state(dev);
@@ -394,8 +394,8 @@ static int spi_st_runtime_suspend(struct device *dev)
static int spi_st_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spi_st->clk);
@@ -408,10 +408,10 @@ static int spi_st_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int spi_st_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -420,10 +420,10 @@ static int spi_st_suspend(struct device *dev)
static int spi_st_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret)
return ret;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index def74ae9b5f6..385832030459 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -357,7 +357,7 @@ static int stm32_qspi_get_mode(u8 buswidth)
static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(spi->controller);
struct stm32_qspi_flash *flash = &qspi->flash[spi_get_chipselect(spi, 0)];
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
@@ -448,7 +448,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
unsigned long polling_rate_us,
unsigned long timeout_ms)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
int ret;
if (!spi_mem_supports_op(mem, op))
@@ -476,7 +476,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
int ret;
ret = pm_runtime_resume_and_get(qspi->dev);
@@ -500,7 +500,7 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller);
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
return -EOPNOTSUPP;
@@ -518,7 +518,7 @@ static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct spi_mem_op op;
u32 addr_max;
int ret;
@@ -640,7 +640,7 @@ end_of_transfer:
static int stm32_qspi_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 presc, mode;
@@ -775,7 +775,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
+ ctrl = devm_spi_alloc_host(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
@@ -861,7 +861,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
- ret = spi_register_master(ctrl);
+ ret = spi_register_controller(ctrl);
if (ret)
goto err_pm_runtime_free;
@@ -892,7 +892,7 @@ static void stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
- spi_unregister_master(qspi->ctrl);
+ spi_unregister_controller(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e6e3e4ea29f9..e61302ef3c21 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -22,58 +22,65 @@
#define DRIVER_NAME "spi_stm32"
-/* STM32F4 SPI registers */
-#define STM32F4_SPI_CR1 0x00
-#define STM32F4_SPI_CR2 0x04
-#define STM32F4_SPI_SR 0x08
-#define STM32F4_SPI_DR 0x0C
-#define STM32F4_SPI_I2SCFGR 0x1C
-
-/* STM32F4_SPI_CR1 bit fields */
-#define STM32F4_SPI_CR1_CPHA BIT(0)
-#define STM32F4_SPI_CR1_CPOL BIT(1)
-#define STM32F4_SPI_CR1_MSTR BIT(2)
-#define STM32F4_SPI_CR1_BR_SHIFT 3
-#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
-#define STM32F4_SPI_CR1_SPE BIT(6)
-#define STM32F4_SPI_CR1_LSBFRST BIT(7)
-#define STM32F4_SPI_CR1_SSI BIT(8)
-#define STM32F4_SPI_CR1_SSM BIT(9)
-#define STM32F4_SPI_CR1_RXONLY BIT(10)
+/* STM32F4/7 SPI registers */
+#define STM32FX_SPI_CR1 0x00
+#define STM32FX_SPI_CR2 0x04
+#define STM32FX_SPI_SR 0x08
+#define STM32FX_SPI_DR 0x0C
+#define STM32FX_SPI_I2SCFGR 0x1C
+
+/* STM32FX_SPI_CR1 bit fields */
+#define STM32FX_SPI_CR1_CPHA BIT(0)
+#define STM32FX_SPI_CR1_CPOL BIT(1)
+#define STM32FX_SPI_CR1_MSTR BIT(2)
+#define STM32FX_SPI_CR1_BR_SHIFT 3
+#define STM32FX_SPI_CR1_BR GENMASK(5, 3)
+#define STM32FX_SPI_CR1_SPE BIT(6)
+#define STM32FX_SPI_CR1_LSBFRST BIT(7)
+#define STM32FX_SPI_CR1_SSI BIT(8)
+#define STM32FX_SPI_CR1_SSM BIT(9)
+#define STM32FX_SPI_CR1_RXONLY BIT(10)
#define STM32F4_SPI_CR1_DFF BIT(11)
-#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
-#define STM32F4_SPI_CR1_CRCEN BIT(13)
-#define STM32F4_SPI_CR1_BIDIOE BIT(14)
-#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
-#define STM32F4_SPI_CR1_BR_MIN 0
-#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
-
-/* STM32F4_SPI_CR2 bit fields */
-#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
-#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
-#define STM32F4_SPI_CR2_SSOE BIT(2)
-#define STM32F4_SPI_CR2_FRF BIT(4)
-#define STM32F4_SPI_CR2_ERRIE BIT(5)
-#define STM32F4_SPI_CR2_RXNEIE BIT(6)
-#define STM32F4_SPI_CR2_TXEIE BIT(7)
-
-/* STM32F4_SPI_SR bit fields */
-#define STM32F4_SPI_SR_RXNE BIT(0)
-#define STM32F4_SPI_SR_TXE BIT(1)
-#define STM32F4_SPI_SR_CHSIDE BIT(2)
-#define STM32F4_SPI_SR_UDR BIT(3)
-#define STM32F4_SPI_SR_CRCERR BIT(4)
-#define STM32F4_SPI_SR_MODF BIT(5)
-#define STM32F4_SPI_SR_OVR BIT(6)
-#define STM32F4_SPI_SR_BSY BIT(7)
-#define STM32F4_SPI_SR_FRE BIT(8)
-
-/* STM32F4_SPI_I2SCFGR bit fields */
-#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
+#define STM32F7_SPI_CR1_CRCL BIT(11)
+#define STM32FX_SPI_CR1_CRCNEXT BIT(12)
+#define STM32FX_SPI_CR1_CRCEN BIT(13)
+#define STM32FX_SPI_CR1_BIDIOE BIT(14)
+#define STM32FX_SPI_CR1_BIDIMODE BIT(15)
+#define STM32FX_SPI_CR1_BR_MIN 0
+#define STM32FX_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
+
+/* STM32FX_SPI_CR2 bit fields */
+#define STM32FX_SPI_CR2_RXDMAEN BIT(0)
+#define STM32FX_SPI_CR2_TXDMAEN BIT(1)
+#define STM32FX_SPI_CR2_SSOE BIT(2)
+#define STM32FX_SPI_CR2_FRF BIT(4)
+#define STM32FX_SPI_CR2_ERRIE BIT(5)
+#define STM32FX_SPI_CR2_RXNEIE BIT(6)
+#define STM32FX_SPI_CR2_TXEIE BIT(7)
+#define STM32F7_SPI_CR2_DS GENMASK(11, 8)
+#define STM32F7_SPI_CR2_FRXTH BIT(12)
+#define STM32F7_SPI_CR2_LDMA_RX BIT(13)
+#define STM32F7_SPI_CR2_LDMA_TX BIT(14)
+
+/* STM32FX_SPI_SR bit fields */
+#define STM32FX_SPI_SR_RXNE BIT(0)
+#define STM32FX_SPI_SR_TXE BIT(1)
+#define STM32FX_SPI_SR_CHSIDE BIT(2)
+#define STM32FX_SPI_SR_UDR BIT(3)
+#define STM32FX_SPI_SR_CRCERR BIT(4)
+#define STM32FX_SPI_SR_MODF BIT(5)
+#define STM32FX_SPI_SR_OVR BIT(6)
+#define STM32FX_SPI_SR_BSY BIT(7)
+#define STM32FX_SPI_SR_FRE BIT(8)
+#define STM32F7_SPI_SR_FRLVL GENMASK(10, 9)
+#define STM32F7_SPI_SR_FTLVL GENMASK(12, 11)
+
+/* STM32FX_SPI_I2SCFGR bit fields */
+#define STM32FX_SPI_I2SCFGR_I2SMOD BIT(11)
/* STM32F4 SPI Baud Rate min/max divisor */
-#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
-#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
+#define STM32FX_SPI_BR_DIV_MIN (2 << STM32FX_SPI_CR1_BR_MIN)
+#define STM32FX_SPI_BR_DIV_MAX (2 << STM32FX_SPI_CR1_BR_MAX)
/* STM32H7 SPI registers */
#define STM32H7_SPI_CR1 0x00
@@ -147,6 +154,20 @@
/* STM32H7_SPI_I2SCFGR bit fields */
#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
+/* STM32MP25 SPI registers bit fields */
+#define STM32MP25_SPI_HWCFGR1 0x3F0
+
+/* STM32MP25_SPI_CR2 bit fields */
+#define STM32MP25_SPI_TSIZE_MAX_LIMITED GENMASK(9, 0)
+
+/* STM32MP25_SPI_HWCFGR1 */
+#define STM32MP25_SPI_HWCFGR1_FULLCFG GENMASK(27, 24)
+#define STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED 0x0
+#define STM32MP25_SPI_HWCFGR1_FULLCFG_FULL 0x1
+#define STM32MP25_SPI_HWCFGR1_DSCFG GENMASK(19, 16)
+#define STM32MP25_SPI_HWCFGR1_DSCFG_16_B 0x0
+#define STM32MP25_SPI_HWCFGR1_DSCFG_32_B 0x1
+
/* STM32H7 SPI Master Baud Rate min/max divisor */
#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
@@ -173,7 +194,7 @@
#define SPI_DMA_MIN_BYTES 16
/* STM32 SPI driver helpers */
-#define STM32_SPI_MASTER_MODE(stm32_spi) (!(stm32_spi)->device_mode)
+#define STM32_SPI_HOST_MODE(stm32_spi) (!(stm32_spi)->device_mode)
#define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode)
/**
@@ -200,6 +221,7 @@ struct stm32_spi_reg {
* @br: baud rate register and bitfields
* @rx: SPI RX data register
* @tx: SPI TX data register
+ * @fullcfg: SPI full or limited feature set register
*/
struct stm32_spi_regspec {
const struct stm32_spi_reg en;
@@ -212,6 +234,7 @@ struct stm32_spi_regspec {
const struct stm32_spi_reg br;
const struct stm32_spi_reg rx;
const struct stm32_spi_reg tx;
+ const struct stm32_spi_reg fullcfg;
};
struct stm32_spi;
@@ -222,13 +245,15 @@ struct stm32_spi;
* @get_fifo_size: routine to get fifo size
* @get_bpw_mask: routine to get bits per word mask
* @disable: routine to disable controller
- * @config: routine to configure controller as SPI Master
+ * @config: routine to configure controller as SPI Host
* @set_bpw: routine to configure registers to for bits per word
* @set_mode: routine to configure registers to desired mode
* @set_data_idleness: optional routine to configure registers to desired idle
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
+ * @write_tx: routine to write to transmit register/FIFO
+ * @read_rx: routine to read from receive register/FIFO
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
@@ -241,6 +266,7 @@ struct stm32_spi;
* @has_fifo: boolean to know if fifo is used for driver
* @has_device_mode: is this compatible capable to switch on device mode
* @flags: compatible specific SPI controller flags used at registration time
+ * @prevent_dma_burst: boolean to indicate to prevent DMA burst
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
@@ -252,6 +278,8 @@ struct stm32_spi_cfg {
int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
+ void (*write_tx)(struct stm32_spi *spi);
+ void (*read_rx)(struct stm32_spi *spi);
void (*transfer_one_dma_start)(struct stm32_spi *spi);
void (*dma_rx_cb)(void *data);
void (*dma_tx_cb)(void *data);
@@ -263,6 +291,7 @@ struct stm32_spi_cfg {
bool has_fifo;
bool has_device_mode;
u16 flags;
+ bool prevent_dma_burst;
};
/**
@@ -276,7 +305,9 @@ struct stm32_spi_cfg {
* @lock: prevent I/O concurrent access
* @irq: SPI controller interrupt line
* @fifo_size: size of the embedded fifo in bytes
- * @cur_midi: master inter-data idleness in ns
+ * @t_size_max: maximum number of data of one transfer
+ * @feature_set: SPI full or limited feature set
+ * @cur_midi: host inter-data idleness in ns
* @cur_speed: speed configured in Hz
* @cur_half_period: time of a half bit in us
* @cur_bpw: number of bits in a single SPI data frame
@@ -303,6 +334,10 @@ struct stm32_spi {
spinlock_t lock; /* prevent I/O concurrent access */
int irq;
unsigned int fifo_size;
+ unsigned int t_size_max;
+ unsigned int feature_set;
+#define STM32_SPI_FEATURE_LIMITED STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED /* 0x0 */
+#define STM32_SPI_FEATURE_FULL STM32MP25_SPI_HWCFGR1_FULLCFG_FULL /* 0x1 */
unsigned int cur_midi;
unsigned int cur_speed;
@@ -324,20 +359,20 @@ struct stm32_spi {
bool device_mode;
};
-static const struct stm32_spi_regspec stm32f4_spi_regspec = {
- .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
+static const struct stm32_spi_regspec stm32fx_spi_regspec = {
+ .en = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE },
- .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
- .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
+ .dma_rx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_RXDMAEN },
+ .dma_tx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN },
- .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
- .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
- .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .cpol = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPOL },
+ .cpha = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPHA },
+ .lsb_first = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_LSBFRST },
.cs_high = {},
- .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
+ .br = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_BR, STM32FX_SPI_CR1_BR_SHIFT },
- .rx = { STM32F4_SPI_DR },
- .tx = { STM32F4_SPI_DR },
+ .rx = { STM32FX_SPI_DR },
+ .tx = { STM32FX_SPI_DR },
};
static const struct stm32_spi_regspec stm32h7_spi_regspec = {
@@ -360,6 +395,28 @@ static const struct stm32_spi_regspec stm32h7_spi_regspec = {
.tx = { STM32H7_SPI_TXDR },
};
+static const struct stm32_spi_regspec stm32mp25_spi_regspec = {
+ /* SPI data transfer is enabled but spi_ker_ck is idle.
+ * CFG1 and CFG2 registers are write protected when SPE is enabled.
+ */
+ .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
+ .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
+
+ .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
+ .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
+ .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .cs_high = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_SSIOP },
+ .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
+ STM32H7_SPI_CFG1_MBR_SHIFT },
+
+ .rx = { STM32H7_SPI_RXDR },
+ .tx = { STM32H7_SPI_TXDR },
+
+ .fullcfg = { STM32MP25_SPI_HWCFGR1, STM32MP25_SPI_HWCFGR1_FULLCFG },
+};
+
static inline void stm32_spi_set_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
@@ -410,6 +467,16 @@ static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
+ * stm32f7_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32f7_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "16-bit maximum data frame\n");
+ return SPI_BPW_RANGE_MASK(4, 16);
+}
+
+/**
* stm32h7_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
@@ -437,6 +504,28 @@ static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
+ * stm32mp25_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32mp25_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ u32 dscfg, max_bpw;
+
+ if (spi->feature_set == STM32_SPI_FEATURE_LIMITED) {
+ dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
+ return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ }
+
+ dscfg = FIELD_GET(STM32MP25_SPI_HWCFGR1_DSCFG,
+ readl_relaxed(spi->base + STM32MP25_SPI_HWCFGR1));
+ max_bpw = 16;
+ if (dscfg == STM32MP25_SPI_HWCFGR1_DSCFG_32_B)
+ max_bpw = 32;
+ dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
+ return SPI_BPW_RANGE_MASK(4, max_bpw);
+}
+
+/**
* stm32_spi_prepare_mbr - Determine baud rate divisor value
* @spi: pointer to the spi controller data structure
* @speed_hz: requested speed
@@ -502,19 +591,48 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
*/
static void stm32f4_spi_write_tx(struct stm32_spi *spi)
{
- if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
- STM32F4_SPI_SR_TXE)) {
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_TXE)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->cur_bpw == 16) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
- writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
+ writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
- writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
+ writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32f7_spi_write_tx - Write bytes to Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32f7_spi_write_tx(struct stm32_spi *spi)
+{
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_TXE)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->tx_len >= sizeof(u16)) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR);
spi->tx_len -= sizeof(u8);
}
}
@@ -566,19 +684,19 @@ static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
*/
static void stm32f4_spi_read_rx(struct stm32_spi *spi)
{
- if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
- STM32F4_SPI_SR_RXNE)) {
+ if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_RXNE)) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if (spi->cur_bpw == 16) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
- *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
+ *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
- *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
+ *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR);
spi->rx_len -= sizeof(u8);
}
}
@@ -587,6 +705,46 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
}
/**
+ * stm32f7_spi_read_rx - Read bytes from Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32f7_spi_read_rx(struct stm32_spi *spi)
+{
+ u32 sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
+ u32 frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr);
+
+ while ((spi->rx_len > 0) && (frlvl > 0)) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if ((spi->rx_len >= sizeof(u16)) && (frlvl >= 2)) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR);
+ spi->rx_len -= sizeof(u8);
+ }
+
+ sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
+ frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr);
+ }
+
+ if (spi->rx_len >= sizeof(u16))
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+ else
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+
+ dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+ __func__, spi->rx_len, sr);
+}
+
+/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
*
@@ -645,10 +803,10 @@ static void stm32_spi_enable(struct stm32_spi *spi)
}
/**
- * stm32f4_spi_disable - Disable SPI controller
+ * stm32fx_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*/
-static void stm32f4_spi_disable(struct stm32_spi *spi)
+static void stm32fx_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 sr;
@@ -657,20 +815,20 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
- if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
- STM32F4_SPI_CR1_SPE)) {
+ if (!(readl_relaxed(spi->base + STM32FX_SPI_CR1) &
+ STM32FX_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Disable interrupts */
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
- STM32F4_SPI_CR2_RXNEIE |
- STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXEIE |
+ STM32FX_SPI_CR2_RXNEIE |
+ STM32FX_SPI_CR2_ERRIE);
/* Wait until BSY = 0 */
- if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
- sr, !(sr & STM32F4_SPI_SR_BSY),
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32FX_SPI_SR,
+ sr, !(sr & STM32FX_SPI_SR_BSY),
10, 100000) < 0) {
dev_warn(spi->dev, "disabling condition timeout\n");
}
@@ -680,14 +838,14 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_async(spi->dma_rx);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
- STM32F4_SPI_CR2_RXDMAEN);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN |
+ STM32FX_SPI_CR2_RXDMAEN);
/* Sequence to clear OVR flag */
- readl_relaxed(spi->base + STM32F4_SPI_DR);
- readl_relaxed(spi->base + STM32F4_SPI_SR);
+ readl_relaxed(spi->base + STM32FX_SPI_DR);
+ readl_relaxed(spi->base + STM32FX_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
}
@@ -763,11 +921,11 @@ static bool stm32_spi_can_dma(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
+ * stm32fx_spi_irq_event - Interrupt handler for SPI controller events
* @irq: interrupt line
* @dev_id: SPI controller ctrl interface
*/
-static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
+static irqreturn_t stm32fx_spi_irq_event(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
@@ -776,26 +934,26 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
spin_lock(&spi->lock);
- sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
+ sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
/*
* BSY flag is not handled in interrupt but it is normal behavior when
* this flag is set.
*/
- sr &= ~STM32F4_SPI_SR_BSY;
+ sr &= ~STM32FX_SPI_SR_BSY;
if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX)) {
/* OVR flag shouldn't be handled for TX only mode */
- sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
- mask |= STM32F4_SPI_SR_TXE;
+ sr &= ~(STM32FX_SPI_SR_OVR | STM32FX_SPI_SR_RXNE);
+ mask |= STM32FX_SPI_SR_TXE;
}
if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX)) {
/* TXE flag is set and is handled when RXNE flag occurs */
- sr &= ~STM32F4_SPI_SR_TXE;
- mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
+ sr &= ~STM32FX_SPI_SR_TXE;
+ mask |= STM32FX_SPI_SR_RXNE | STM32FX_SPI_SR_OVR;
}
if (!(sr & mask)) {
@@ -804,12 +962,12 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
return IRQ_NONE;
}
- if (sr & STM32F4_SPI_SR_OVR) {
+ if (sr & STM32FX_SPI_SR_OVR) {
dev_warn(spi->dev, "Overrun: received value discarded\n");
/* Sequence to clear OVR flag */
- readl_relaxed(spi->base + STM32F4_SPI_DR);
- readl_relaxed(spi->base + STM32F4_SPI_SR);
+ readl_relaxed(spi->base + STM32FX_SPI_DR);
+ readl_relaxed(spi->base + STM32FX_SPI_SR);
/*
* If overrun is detected, it means that something went wrong,
@@ -820,28 +978,28 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
goto end_irq;
}
- if (sr & STM32F4_SPI_SR_TXE) {
+ if (sr & STM32FX_SPI_SR_TXE) {
if (spi->tx_buf)
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
if (spi->tx_len == 0)
end = true;
}
- if (sr & STM32F4_SPI_SR_RXNE) {
- stm32f4_spi_read_rx(spi);
+ if (sr & STM32FX_SPI_SR_RXNE) {
+ spi->cfg->read_rx(spi);
if (spi->rx_len == 0)
end = true;
else if (spi->tx_buf)/* Load data for discontinuous mode */
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
}
end_irq:
if (end) {
/* Immediately disable interrupts to do not generate new one */
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
- STM32F4_SPI_CR2_TXEIE |
- STM32F4_SPI_CR2_RXNEIE |
- STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2,
+ STM32FX_SPI_CR2_TXEIE |
+ STM32FX_SPI_CR2_RXNEIE |
+ STM32FX_SPI_CR2_ERRIE);
spin_unlock(&spi->lock);
return IRQ_WAKE_THREAD;
}
@@ -851,17 +1009,17 @@ end_irq:
}
/**
- * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * stm32fx_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller interface
*/
-static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
+static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
- stm32f4_spi_disable(spi);
+ stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -974,7 +1132,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
unsigned long flags;
u32 clrb = 0, setb = 0;
- /* SPI slave device may need time between data frames */
+ /* SPI target device may need time between data frames */
spi->cur_midi = 0;
if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
@@ -1013,7 +1171,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
int ret;
ret = spi_split_transfers_maxwords(ctrl, msg,
- STM32H7_SPI_TSIZE_MAX,
+ spi->t_size_max,
GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
@@ -1034,18 +1192,18 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_dma_tx_cb - dma callback
+ * stm32fx_spi_dma_tx_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA TX channel.
*/
-static void stm32f4_spi_dma_tx_cb(void *data)
+static void stm32fx_spi_dma_tx_cb(void *data)
{
struct stm32_spi *spi = data;
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
- stm32f4_spi_disable(spi);
+ stm32fx_spi_disable(spi);
}
}
@@ -1067,15 +1225,19 @@ static void stm32_spi_dma_rx_cb(void *data)
* stm32_spi_dma_config - configure dma slave channel depending on current
* transfer bits_per_word.
* @spi: pointer to the spi controller data structure
+ * @dma_chan: pointer to the DMA channel
* @dma_conf: pointer to the dma_slave_config structure
* @dir: direction of the dma transfer
*/
static void stm32_spi_dma_config(struct stm32_spi *spi,
+ struct dma_chan *dma_chan,
struct dma_slave_config *dma_conf,
enum dma_transfer_direction dir)
{
enum dma_slave_buswidth buswidth;
- u32 maxburst;
+ struct dma_slave_caps caps;
+ u32 maxburst = 1;
+ int ret;
if (spi->cur_bpw <= 8)
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -1084,15 +1246,14 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (spi->cfg->has_fifo) {
- /* Valid for DMA Half or Full Fifo threshold */
- if (spi->cur_fthlv == 2)
- maxburst = 1;
- else
- maxburst = spi->cur_fthlv;
- } else {
- maxburst = 1;
- }
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (!spi->cfg->prevent_dma_burst && spi->cfg->has_fifo && spi->cur_fthlv != 2)
+ maxburst = spi->cur_fthlv;
+
+ /* Get the DMA channel caps, and adjust maxburst if possible */
+ ret = dma_get_slave_caps(dma_chan, &caps);
+ if (!ret)
+ maxburst = min(maxburst, caps.max_burst);
memset(dma_conf, 0, sizeof(struct dma_slave_config));
dma_conf->direction = dir;
@@ -1114,21 +1275,21 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
}
/**
- * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
+ * stm32fx_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
* @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
-static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
+static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr2 = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
- cr2 |= STM32F4_SPI_CR2_TXEIE;
+ cr2 |= STM32FX_SPI_CR2_TXEIE;
} else if (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX) {
@@ -1136,20 +1297,20 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
- cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
+ cr2 |= STM32FX_SPI_CR2_RXNEIE | STM32FX_SPI_CR2_ERRIE;
} else {
return -EINVAL;
}
spin_lock_irqsave(&spi->lock, flags);
- stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
stm32_spi_enable(spi);
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -1189,7 +1350,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
@@ -1200,11 +1361,11 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
}
/**
- * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * stm32fx_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
*/
-static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
+static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
/* In DMA mode end of transfer is handled by DMA TX or RX callback. */
if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
@@ -1214,13 +1375,29 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
- stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
stm32_spi_enable(spi);
}
/**
+ * stm32f7_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* Configure DMA request trigger threshold according to DMA width */
+ if (spi->cur_bpw <= 8)
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+ else
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+
+ stm32fx_spi_transfer_one_dma_start(spi);
+}
+
+/**
* stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
@@ -1237,7 +1414,7 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_enable(spi);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
@@ -1260,7 +1437,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
rx_dma_desc = NULL;
if (spi->rx_buf && spi->dma_rx) {
- stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
+ stm32_spi_dma_config(spi, spi->dma_rx, &rx_dma_conf, DMA_DEV_TO_MEM);
dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
/* Enable Rx DMA request */
@@ -1276,7 +1453,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
tx_dma_desc = NULL;
if (spi->tx_buf && spi->dma_tx) {
- stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
+ stm32_spi_dma_config(spi, spi->dma_tx, &tx_dma_conf, DMA_MEM_TO_DEV);
dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
tx_dma_desc = dmaengine_prep_slave_sg(
@@ -1353,9 +1530,34 @@ dma_desc_error:
static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
{
if (spi->cur_bpw == 16)
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ else
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF);
+}
+
+/**
+ * stm32f7_spi_set_bpw - Configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f7_spi_set_bpw(struct stm32_spi *spi)
+{
+ u32 bpw;
+ u32 cr2_clrb = 0, cr2_setb = 0;
+
+ bpw = spi->cur_bpw - 1;
+
+ cr2_clrb |= STM32F7_SPI_CR2_DS;
+ cr2_setb |= FIELD_PREP(STM32F7_SPI_CR2_DS, bpw);
+
+ if (spi->rx_len >= sizeof(u16))
+ cr2_clrb |= STM32F7_SPI_CR2_FRXTH;
else
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ cr2_setb |= STM32F7_SPI_CR2_FRXTH;
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32FX_SPI_CR2) &
+ ~cr2_clrb) | cr2_setb,
+ spi->base + STM32FX_SPI_CR2);
}
/**
@@ -1385,7 +1587,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
}
/**
- * stm32_spi_set_mbr - Configure baud rate divisor in master mode
+ * stm32_spi_set_mbr - Configure baud rate divisor in host mode
* @spi: pointer to the spi controller data structure
* @mbrdiv: baud rate divisor value
*/
@@ -1433,26 +1635,26 @@ static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
}
/**
- * stm32f4_spi_set_mode - configure communication mode
+ * stm32fx_spi_set_mode - configure communication mode
* @spi: pointer to the spi controller data structure
* @comm_type: type of communication to configure
*/
-static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+static int stm32fx_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
{
if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE |
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE |
+ STM32FX_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_FULL_DUPLEX ||
comm_type == SPI_SIMPLEX_RX) {
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE |
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE |
+ STM32FX_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_3WIRE_RX) {
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIOE);
} else {
return -EINVAL;
}
@@ -1497,7 +1699,7 @@ static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
/**
* stm32h7_spi_data_idleness - configure minimum time delay inserted between two
- * consecutive data frames in master mode
+ * consecutive data frames in host mode
* @spi: pointer to the spi controller data structure
* @len: transfer len
*/
@@ -1531,7 +1733,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
- if (nb_words <= STM32H7_SPI_TSIZE_MAX) {
+ if (nb_words <= spi->t_size_max) {
writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words),
spi->base + STM32H7_SPI_CR2);
} else {
@@ -1566,7 +1768,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cfg->set_bpw(spi);
/* Update spi->cur_speed with real clock speed */
- if (STM32_SPI_MASTER_MODE(spi)) {
+ if (STM32_SPI_HOST_MODE(spi)) {
mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
spi->cfg->baud_rate_div_min,
spi->cfg->baud_rate_div_max);
@@ -1586,7 +1788,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cur_comm = comm_type;
- if (STM32_SPI_MASTER_MODE(spi) && spi->cfg->set_data_idleness)
+ if (STM32_SPI_HOST_MODE(spi) && spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
@@ -1607,7 +1809,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
dev_dbg(spi->dev,
"data frame of %d-bit, data packet of %d data frames\n",
spi->cur_bpw, spi->cur_fthlv);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
spi->cur_xferlen, nb_words);
@@ -1672,30 +1874,30 @@ static int stm32_spi_unprepare_msg(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_config - Configure SPI controller as SPI master
+ * stm32fx_spi_config - Configure SPI controller as SPI host
* @spi: pointer to the spi controller data structure
*/
-static int stm32f4_spi_config(struct stm32_spi *spi)
+static int stm32fx_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
- stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
- STM32F4_SPI_I2SCFGR_I2SMOD);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_I2SCFGR,
+ STM32FX_SPI_I2SCFGR_I2SMOD);
/*
* - SS input value high
* - transmitter half duplex direction
- * - Set the master mode (default Motorola mode)
- * - Consider 1 master/n slaves configuration and
+ * - Set the host mode (default Motorola mode)
+ * - Consider 1 host/n targets configuration and
* SS input value is determined by the SSI bit
*/
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
- STM32F4_SPI_CR1_BIDIOE |
- STM32F4_SPI_CR1_MSTR |
- STM32F4_SPI_CR1_SSM);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SSI |
+ STM32FX_SPI_CR1_BIDIOE |
+ STM32FX_SPI_CR1_MSTR |
+ STM32FX_SPI_CR1_SSM);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -1729,8 +1931,8 @@ static int stm32h7_spi_config(struct stm32_spi *spi)
cr1 |= STM32H7_SPI_CR1_HDDIR | STM32H7_SPI_CR1_MASRX | STM32H7_SPI_CR1_SSI;
/*
- * - Set the master mode (default Motorola mode)
- * - Consider 1 master/n devices configuration and
+ * - Set the host mode (default Motorola mode)
+ * - Consider 1 host/n devices configuration and
* SS input value is determined by the SSI bit
* - keep control of all associated GPIOs
*/
@@ -1746,25 +1948,48 @@ static int stm32h7_spi_config(struct stm32_spi *spi)
}
static const struct stm32_spi_cfg stm32f4_spi_cfg = {
- .regs = &stm32f4_spi_regspec,
+ .regs = &stm32fx_spi_regspec,
.get_bpw_mask = stm32f4_spi_get_bpw_mask,
- .disable = stm32f4_spi_disable,
- .config = stm32f4_spi_config,
+ .disable = stm32fx_spi_disable,
+ .config = stm32fx_spi_config,
.set_bpw = stm32f4_spi_set_bpw,
- .set_mode = stm32f4_spi_set_mode,
- .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
- .dma_tx_cb = stm32f4_spi_dma_tx_cb,
+ .set_mode = stm32fx_spi_set_mode,
+ .write_tx = stm32f4_spi_write_tx,
+ .read_rx = stm32f4_spi_read_rx,
+ .transfer_one_dma_start = stm32fx_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32fx_spi_dma_tx_cb,
.dma_rx_cb = stm32_spi_dma_rx_cb,
- .transfer_one_irq = stm32f4_spi_transfer_one_irq,
- .irq_handler_event = stm32f4_spi_irq_event,
- .irq_handler_thread = stm32f4_spi_irq_thread,
- .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
- .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
+ .transfer_one_irq = stm32fx_spi_transfer_one_irq,
+ .irq_handler_event = stm32fx_spi_irq_event,
+ .irq_handler_thread = stm32fx_spi_irq_thread,
+ .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX,
.has_fifo = false,
.has_device_mode = false,
.flags = SPI_CONTROLLER_MUST_TX,
};
+static const struct stm32_spi_cfg stm32f7_spi_cfg = {
+ .regs = &stm32fx_spi_regspec,
+ .get_bpw_mask = stm32f7_spi_get_bpw_mask,
+ .disable = stm32fx_spi_disable,
+ .config = stm32fx_spi_config,
+ .set_bpw = stm32f7_spi_set_bpw,
+ .set_mode = stm32fx_spi_set_mode,
+ .write_tx = stm32f7_spi_write_tx,
+ .read_rx = stm32f7_spi_read_rx,
+ .transfer_one_dma_start = stm32f7_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32fx_spi_dma_tx_cb,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ .transfer_one_irq = stm32fx_spi_transfer_one_irq,
+ .irq_handler_event = stm32fx_spi_irq_event,
+ .irq_handler_thread = stm32fx_spi_irq_thread,
+ .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX,
+ .has_fifo = false,
+ .flags = SPI_CONTROLLER_MUST_TX,
+};
+
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.regs = &stm32h7_spi_regspec,
.get_fifo_size = stm32h7_spi_get_fifo_size,
@@ -1775,6 +2000,8 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.set_mode = stm32h7_spi_set_mode,
.set_data_idleness = stm32h7_spi_data_idleness,
.set_number_of_data = stm32h7_spi_number_of_data,
+ .write_tx = stm32h7_spi_write_txfifo,
+ .read_rx = stm32h7_spi_read_rxfifo,
.transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
.dma_rx_cb = stm32_spi_dma_rx_cb,
/*
@@ -1789,9 +2016,40 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.has_device_mode = true,
};
+/*
+ * STM32MP2 is compatible with the STM32H7 except:
+ * - enforce the DMA maxburst value to 1
+ * - spi8 have limited feature set (TSIZE_MAX = 1024, BPW of 8 OR 16)
+ */
+static const struct stm32_spi_cfg stm32mp25_spi_cfg = {
+ .regs = &stm32mp25_spi_regspec,
+ .get_fifo_size = stm32h7_spi_get_fifo_size,
+ .get_bpw_mask = stm32mp25_spi_get_bpw_mask,
+ .disable = stm32h7_spi_disable,
+ .config = stm32h7_spi_config,
+ .set_bpw = stm32h7_spi_set_bpw,
+ .set_mode = stm32h7_spi_set_mode,
+ .set_data_idleness = stm32h7_spi_data_idleness,
+ .set_number_of_data = stm32h7_spi_number_of_data,
+ .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ /*
+ * dma_tx_cb is not necessary since in case of TX, dma is followed by
+ * SPI access hence handling is performed within the SPI interrupt
+ */
+ .transfer_one_irq = stm32h7_spi_transfer_one_irq,
+ .irq_handler_thread = stm32h7_spi_irq_thread,
+ .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
+ .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
+ .has_fifo = true,
+ .prevent_dma_burst = true,
+};
+
static const struct of_device_id stm32_spi_of_match[] = {
+ { .compatible = "st,stm32mp25-spi", .data = (void *)&stm32mp25_spi_cfg },
{ .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
{ .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
+ { .compatible = "st,stm32f7-spi", .data = (void *)&stm32f7_spi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
@@ -1820,9 +2078,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
}
if (device_mode)
- ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi));
+ ctrl = devm_spi_alloc_target(&pdev->dev, sizeof(struct stm32_spi));
else
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(struct stm32_spi));
if (!ctrl) {
dev_err(&pdev->dev, "spi controller allocation failed\n");
return -ENOMEM;
@@ -1892,6 +2150,22 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (spi->cfg->has_fifo)
spi->fifo_size = spi->cfg->get_fifo_size(spi);
+ spi->feature_set = STM32_SPI_FEATURE_FULL;
+ if (spi->cfg->regs->fullcfg.reg) {
+ spi->feature_set =
+ FIELD_GET(STM32MP25_SPI_HWCFGR1_FULLCFG,
+ readl_relaxed(spi->base + spi->cfg->regs->fullcfg.reg));
+
+ dev_dbg(spi->dev, "%s feature set\n",
+ spi->feature_set == STM32_SPI_FEATURE_FULL ? "full" : "limited");
+ }
+
+ /* Only for STM32H7 and after */
+ spi->t_size_max = spi->feature_set == STM32_SPI_FEATURE_FULL ?
+ STM32H7_SPI_TSIZE_MAX :
+ STM32MP25_SPI_TSIZE_MAX_LIMITED;
+ dev_dbg(spi->dev, "one message max size %d\n", spi->t_size_max);
+
ret = spi->cfg->config(spi);
if (ret) {
dev_err(&pdev->dev, "controller configuration failed: %d\n",
@@ -1913,7 +2187,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
ctrl->unprepare_message = stm32_spi_unprepare_msg;
ctrl->flags = spi->cfg->flags;
if (STM32_SPI_DEVICE_MODE(spi))
- ctrl->slave_abort = stm32h7_spi_device_abort;
+ ctrl->target_abort = stm32h7_spi_device_abort;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
@@ -1960,7 +2234,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "driver initialized (%s mode)\n",
- STM32_SPI_MASTER_MODE(spi) ? "master" : "device");
+ STM32_SPI_HOST_MODE(spi) ? "host" : "device");
return 0;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index b8947265d329..11d8bd27b3e9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -75,7 +75,7 @@
#define SUN4I_FIFO_STA_TF_CNT_BITS 16
struct sun4i_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base_addr;
struct clk *hclk;
struct clk *mclk;
@@ -161,7 +161,7 @@ static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
@@ -201,11 +201,11 @@ static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
return SUN4I_MAX_XFER_SIZE - 1;
}
-static int sun4i_spi_transfer_one(struct spi_master *master,
+static int sun4i_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
unsigned int mclk_rate, div, timeout;
unsigned int start, end, tx_time;
unsigned int tx_len = 0;
@@ -331,7 +331,7 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
msecs_to_jiffies(tx_time));
end = jiffies;
if (!timeout) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
@@ -386,8 +386,8 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
static int sun4i_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(sspi->hclk);
@@ -415,8 +415,8 @@ out:
static int sun4i_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
clk_disable_unprepare(sspi->mclk);
clk_disable_unprepare(sspi->hclk);
@@ -426,62 +426,62 @@ static int sun4i_spi_runtime_suspend(struct device *dev)
static int sun4i_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct sun4i_spi *sspi;
int ret = 0, irq;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
- if (!master) {
- dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!host) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Host\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- sspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ sspi = spi_controller_get_devdata(host);
sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
- goto err_free_master;
+ goto err_free_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto err_free_master;
+ goto err_free_host;
}
ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
0, "sun4i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_free_master;
+ goto err_free_host;
}
- sspi->master = master;
- master->max_speed_hz = 100 * 1000 * 1000;
- master->min_speed_hz = 3 * 1000;
- master->set_cs = sun4i_spi_set_cs;
- master->transfer_one = sun4i_spi_transfer_one;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->max_transfer_size = sun4i_spi_max_transfer_size;
+ sspi->host = host;
+ host->max_speed_hz = 100 * 1000 * 1000;
+ host->min_speed_hz = 3 * 1000;
+ host->set_cs = sun4i_spi_set_cs;
+ host->transfer_one = sun4i_spi_transfer_one;
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->auto_runtime_pm = true;
+ host->max_transfer_size = sun4i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
- goto err_free_master;
+ goto err_free_host;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
- goto err_free_master;
+ goto err_free_host;
}
init_completion(&sspi->done);
@@ -493,16 +493,16 @@ static int sun4i_spi_probe(struct platform_device *pdev)
ret = sun4i_spi_runtime_resume(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't resume the device\n");
- goto err_free_master;
+ goto err_free_host;
}
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
+ dev_err(&pdev->dev, "cannot register SPI host\n");
goto err_pm_disable;
}
@@ -511,8 +511,8 @@ static int sun4i_spi_probe(struct platform_device *pdev)
err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun4i_spi_runtime_suspend(&pdev->dev);
-err_free_master:
- spi_master_put(master);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fddc63309773..cd018ea1abf1 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -97,7 +97,7 @@ struct sun6i_spi_cfg {
};
struct sun6i_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base_addr;
dma_addr_t dma_addr_rx;
dma_addr_t dma_addr_tx;
@@ -181,7 +181,7 @@ static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
@@ -212,7 +212,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct spi_transfer *tfr)
{
struct dma_async_tx_descriptor *rxdesc, *txdesc;
- struct spi_master *master = sspi->master;
+ struct spi_controller *host = sspi->host;
rxdesc = NULL;
if (tfr->rx_buf) {
@@ -223,9 +223,9 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
.src_maxburst = 8,
};
- dmaengine_slave_config(master->dma_rx, &rxconf);
+ dmaengine_slave_config(host->dma_rx, &rxconf);
- rxdesc = dmaengine_prep_slave_sg(master->dma_rx,
+ rxdesc = dmaengine_prep_slave_sg(host->dma_rx,
tfr->rx_sg.sgl,
tfr->rx_sg.nents,
DMA_DEV_TO_MEM,
@@ -245,38 +245,38 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
.dst_maxburst = 8,
};
- dmaengine_slave_config(master->dma_tx, &txconf);
+ dmaengine_slave_config(host->dma_tx, &txconf);
- txdesc = dmaengine_prep_slave_sg(master->dma_tx,
+ txdesc = dmaengine_prep_slave_sg(host->dma_tx,
tfr->tx_sg.sgl,
tfr->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(host->dma_rx);
return -EINVAL;
}
}
if (tfr->rx_buf) {
dmaengine_submit(rxdesc);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(host->dma_rx);
}
if (tfr->tx_buf) {
dmaengine_submit(txdesc);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_tx);
}
return 0;
}
-static int sun6i_spi_transfer_one(struct spi_master *master,
+static int sun6i_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
unsigned int div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
@@ -293,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
- use_dma = master->can_dma ? master->can_dma(master, spi, tfr) : false;
+ use_dma = host->can_dma ? host->can_dma(host, spi, tfr) : false;
/* Clear pending interrupts */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
@@ -463,7 +463,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
} else {
ret = sun6i_spi_prepare_dma(sspi, tfr);
if (ret) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: prepare DMA failed, ret=%d",
dev_name(&spi->dev), ret);
return ret;
@@ -486,7 +486,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
- tx_time = spi_controller_xfer_timeout(master, tfr);
+ tx_time = spi_controller_xfer_timeout(host, tfr);
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
@@ -502,13 +502,13 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
timeout);
if (!timeout)
- dev_warn(&master->dev, "RX DMA timeout\n");
+ dev_warn(&host->dev, "RX DMA timeout\n");
}
}
end = jiffies;
if (!timeout) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
@@ -518,8 +518,8 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
if (ret && use_dma) {
- dmaengine_terminate_sync(master->dma_rx);
- dmaengine_terminate_sync(master->dma_tx);
+ dmaengine_terminate_sync(host->dma_rx);
+ dmaengine_terminate_sync(host->dma_tx);
}
return ret;
@@ -564,8 +564,8 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
static int sun6i_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(sspi->hclk);
@@ -601,8 +601,8 @@ out:
static int sun6i_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
reset_control_assert(sspi->rstc);
clk_disable_unprepare(sspi->mclk);
@@ -611,11 +611,11 @@ static int sun6i_spi_runtime_suspend(struct device *dev)
return 0;
}
-static bool sun6i_spi_can_dma(struct spi_master *master,
+static bool sun6i_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
/*
* If the number of spi words to transfer is less or equal than
@@ -627,67 +627,67 @@ static bool sun6i_spi_can_dma(struct spi_master *master,
static int sun6i_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct sun6i_spi *sspi;
struct resource *mem;
int ret = 0, irq;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
- if (!master) {
- dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!host) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Host\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- sspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ sspi = spi_controller_get_devdata(host);
sspi->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
- goto err_free_master;
+ goto err_free_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto err_free_master;
+ goto err_free_host;
}
ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
0, "sun6i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_free_master;
+ goto err_free_host;
}
- sspi->master = master;
+ sspi->host = host;
sspi->cfg = of_device_get_match_data(&pdev->dev);
- master->max_speed_hz = 100 * 1000 * 1000;
- master->min_speed_hz = 3 * 1000;
- master->use_gpio_descriptors = true;
- master->set_cs = sun6i_spi_set_cs;
- master->transfer_one = sun6i_spi_transfer_one;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
- sspi->cfg->mode_bits;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->max_transfer_size = sun6i_spi_max_transfer_size;
+ host->max_speed_hz = 100 * 1000 * 1000;
+ host->min_speed_hz = 3 * 1000;
+ host->use_gpio_descriptors = true;
+ host->set_cs = sun6i_spi_set_cs;
+ host->transfer_one = sun6i_spi_transfer_one;
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ sspi->cfg->mode_bits;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->auto_runtime_pm = true;
+ host->max_transfer_size = sun6i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
- goto err_free_master;
+ goto err_free_host;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
- goto err_free_master;
+ goto err_free_host;
}
init_completion(&sspi->done);
@@ -697,34 +697,34 @@ static int sun6i_spi_probe(struct platform_device *pdev)
if (IS_ERR(sspi->rstc)) {
dev_err(&pdev->dev, "Couldn't get reset controller\n");
ret = PTR_ERR(sspi->rstc);
- goto err_free_master;
+ goto err_free_host;
}
- master->dma_tx = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(master->dma_tx)) {
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
/* Check tx to see if we need defer probing driver */
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_free_master;
+ goto err_free_host;
}
dev_warn(&pdev->dev, "Failed to request TX DMA channel\n");
- master->dma_tx = NULL;
+ host->dma_tx = NULL;
}
- master->dma_rx = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(&pdev->dev, "Failed to request RX DMA channel\n");
- master->dma_rx = NULL;
+ host->dma_rx = NULL;
}
- if (master->dma_tx && master->dma_rx) {
+ if (host->dma_tx && host->dma_rx) {
sspi->dma_addr_tx = mem->start + SUN6I_TXDATA_REG;
sspi->dma_addr_rx = mem->start + SUN6I_RXDATA_REG;
- master->can_dma = sun6i_spi_can_dma;
+ host->can_dma = sun6i_spi_can_dma;
}
/*
@@ -742,9 +742,9 @@ static int sun6i_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
+ dev_err(&pdev->dev, "cannot register SPI host\n");
goto err_pm_disable;
}
@@ -754,26 +754,26 @@ err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun6i_spi_runtime_suspend(&pdev->dev);
err_free_dma_rx:
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
err_free_dma_tx:
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
-err_free_master:
- spi_master_put(master);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
static void sun6i_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_controller *host = platform_get_drvdata(pdev);
pm_runtime_force_suspend(&pdev->dev);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
}
static const struct sun6i_spi_cfg sun6i_a31_spi_cfg = {
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
index eb8f835a4771..4e481380c259 100644
--- a/drivers/spi/spi-sunplus-sp7021.c
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -70,8 +70,8 @@
#define SP7021_FIFO_DATA_LEN (16)
enum {
- SP7021_MASTER_MODE = 0,
- SP7021_SLAVE_MODE = 1,
+ SP7021_HOST_MODE = 0,
+ SP7021_TARGET_MODE = 1,
};
struct sp7021_spi_ctlr {
@@ -88,7 +88,7 @@ struct sp7021_spi_ctlr {
// data xfer lock
struct mutex buf_lock;
struct completion isr_done;
- struct completion slave_isr;
+ struct completion target_isr;
unsigned int rx_cur_len;
unsigned int tx_cur_len;
unsigned int data_unit;
@@ -96,7 +96,7 @@ struct sp7021_spi_ctlr {
u8 *rx_buf;
};
-static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
+static irqreturn_t sp7021_spi_target_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int data_status;
@@ -104,25 +104,25 @@ static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
data_status |= SP7021_SLAVE_CLR_INT;
writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
- complete(&pspim->slave_isr);
+ complete(&pspim->target_isr);
return IRQ_HANDLED;
}
-static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
+static int sp7021_spi_target_abort(struct spi_controller *ctlr)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
- complete(&pspim->slave_isr);
+ complete(&pspim->target_isr);
complete(&pspim->isr_done);
return 0;
}
-static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
+static int sp7021_spi_target_tx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
- reinit_completion(&pspim->slave_isr);
+ reinit_completion(&pspim->target_isr);
value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
@@ -137,7 +137,7 @@ static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer
return 0;
}
-static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
+static int sp7021_spi_target_rx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
@@ -155,7 +155,7 @@ static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer
return 0;
}
-static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+static void sp7021_spi_host_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
@@ -166,7 +166,7 @@ static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len
}
}
-static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+static void sp7021_spi_host_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
@@ -177,7 +177,7 @@ static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len
}
}
-static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
+static irqreturn_t sp7021_spi_host_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int tx_cnt, total_len;
@@ -206,9 +206,9 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
fd_status, rx_cnt, tx_cnt, tx_len);
if (rx_cnt > 0)
- sp7021_spi_master_rb(pspim, rx_cnt);
+ sp7021_spi_host_rb(pspim, rx_cnt);
if (tx_cnt > 0)
- sp7021_spi_master_wb(pspim, tx_cnt);
+ sp7021_spi_host_wb(pspim, tx_cnt);
fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
@@ -224,7 +224,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
if (rx_cnt > 0)
- sp7021_spi_master_rb(pspim, rx_cnt);
+ sp7021_spi_host_rb(pspim, rx_cnt);
}
value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
value |= SP7021_CLR_MASTER_INT;
@@ -240,7 +240,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
pspim->tx_cur_len = 0;
pspim->rx_cur_len = 0;
@@ -251,7 +251,7 @@ static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device
static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
struct spi_device *s = msg->spi;
u32 valus, rs = 0;
@@ -283,7 +283,7 @@ static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
u32 clk_rate, clk_sel, div;
clk_rate = clk_get_rate(pspim->spi_clk);
@@ -295,10 +295,10 @@ static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfe
writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
}
-static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+static int sp7021_spi_host_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
unsigned long timeout = msecs_to_jiffies(1000);
unsigned int xfer_cnt, xfer_len, last_len;
unsigned int i, len_temp;
@@ -323,7 +323,7 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp
if (pspim->tx_cur_len < xfer_len) {
len_temp = min(pspim->data_unit, xfer_len);
- sp7021_spi_master_wb(pspim, len_temp);
+ sp7021_spi_host_wb(pspim, len_temp);
}
reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
reg_temp &= ~SP7021_CLEAN_RW_BYTE;
@@ -359,10 +359,10 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp
return 0;
}
-static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+static int sp7021_spi_target_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
struct device *dev = pspim->dev;
int ret;
@@ -371,14 +371,14 @@ static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
- ret = sp7021_spi_slave_tx(spi, xfer);
+ ret = sp7021_spi_target_tx(spi, xfer);
dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
} else if (xfer->rx_buf && !xfer->tx_buf) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma))
return -ENOMEM;
- ret = sp7021_spi_slave_rx(spi, xfer);
+ ret = sp7021_spi_target_rx(spi, xfer);
dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
} else {
dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
@@ -409,14 +409,14 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
if (device_property_read_bool(dev, "spi-slave"))
- mode = SP7021_SLAVE_MODE;
+ mode = SP7021_TARGET_MODE;
else
- mode = SP7021_MASTER_MODE;
+ mode = SP7021_HOST_MODE;
- if (mode == SP7021_SLAVE_MODE)
- ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
+ if (mode == SP7021_TARGET_MODE)
+ ctlr = devm_spi_alloc_target(dev, sizeof(*pspim));
else
- ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
+ ctlr = devm_spi_alloc_host(dev, sizeof(*pspim));
if (!ctlr)
return -ENOMEM;
device_set_node(&ctlr->dev, dev_fwnode(dev));
@@ -424,9 +424,9 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
ctlr->auto_runtime_pm = true;
ctlr->prepare_message = sp7021_spi_controller_prepare_message;
- if (mode == SP7021_SLAVE_MODE) {
- ctlr->transfer_one = sp7021_spi_slave_transfer_one;
- ctlr->slave_abort = sp7021_spi_slave_abort;
+ if (mode == SP7021_TARGET_MODE) {
+ ctlr->transfer_one = sp7021_spi_target_transfer_one;
+ ctlr->target_abort = sp7021_spi_target_abort;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
} else {
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -434,7 +434,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ctlr->max_speed_hz = 25000000;
ctlr->use_gpio_descriptors = true;
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
- ctlr->transfer_one = sp7021_spi_master_transfer_one;
+ ctlr->transfer_one = sp7021_spi_host_transfer_one;
}
platform_set_drvdata(pdev, ctlr);
pspim = spi_controller_get_devdata(ctlr);
@@ -443,7 +443,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
pspim->dev = dev;
mutex_init(&pspim->buf_lock);
init_completion(&pspim->isr_done);
- init_completion(&pspim->slave_isr);
+ init_completion(&pspim->target_isr);
pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
if (IS_ERR(pspim->m_base))
@@ -485,12 +485,12 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
+ ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_host_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
- ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
+ ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_target_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
@@ -499,7 +499,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ret = spi_register_controller(ctlr);
if (ret) {
pm_runtime_disable(dev);
- return dev_err_probe(dev, ret, "spi_register_master fail\n");
+ return dev_err_probe(dev, ret, "spi_register_controller fail\n");
}
return 0;
}
@@ -516,7 +516,7 @@ static void sp7021_spi_controller_remove(struct platform_device *pdev)
static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
@@ -524,7 +524,7 @@ static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
reset_control_deassert(pspim->rstc);
return clk_prepare_enable(pspim->spi_clk);
@@ -534,7 +534,7 @@ static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
static int sp7021_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
@@ -542,7 +542,7 @@ static int sp7021_spi_runtime_suspend(struct device *dev)
static int sp7021_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_deassert(pspim->rstc);
}
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index aeaf7db022f0..7cb4301a6fb2 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -225,11 +225,11 @@ static int write_fifo(struct synquacer_spi *sspi)
return 0;
}
-static int synquacer_spi_config(struct spi_master *master,
+static int synquacer_spi_config(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
u32 rate, val, div;
@@ -263,7 +263,7 @@ static int synquacer_spi_config(struct spi_master *master,
}
sspi->transfer_mode = transfer_mode;
- rate = master->max_speed_hz;
+ rate = host->max_speed_hz;
div = DIV_ROUND_UP(rate, speed);
if (div > 254) {
@@ -350,11 +350,11 @@ static int synquacer_spi_config(struct spi_master *master,
return 0;
}
-static int synquacer_spi_transfer_one(struct spi_master *master,
+static int synquacer_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
int status = 0;
u32 words;
@@ -378,7 +378,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
xfer->bits_per_word = 32;
- ret = synquacer_spi_config(master, spi, xfer);
+ ret = synquacer_spi_config(host, spi, xfer);
/* restore */
xfer->bits_per_word = bpw;
@@ -482,7 +482,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
@@ -517,11 +517,11 @@ static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
return -EBUSY;
}
-static int synquacer_spi_enable(struct spi_master *master)
+static int synquacer_spi_enable(struct spi_controller *host)
{
u32 val;
int status;
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
/* Disable module */
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
@@ -601,18 +601,18 @@ static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
static int synquacer_spi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct synquacer_spi *sspi;
int ret;
int rx_irq, tx_irq;
- master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*sspi));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- sspi = spi_master_get_devdata(master);
+ sspi = spi_controller_get_devdata(host);
sspi->dev = &pdev->dev;
init_completion(&sspi->transfer_done);
@@ -625,7 +625,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
- &master->max_speed_hz); /* for ACPI */
+ &host->max_speed_hz); /* for ACPI */
if (dev_of_node(&pdev->dev)) {
if (device_property_match_string(&pdev->dev,
@@ -655,21 +655,21 @@ static int synquacer_spi_probe(struct platform_device *pdev)
goto put_spi;
}
- master->max_speed_hz = clk_get_rate(sspi->clk);
+ host->max_speed_hz = clk_get_rate(sspi->clk);
}
- if (!master->max_speed_hz) {
+ if (!host->max_speed_hz) {
dev_err(&pdev->dev, "missing clock source\n");
ret = -EINVAL;
goto disable_clk;
}
- master->min_speed_hz = master->max_speed_hz / 254;
+ host->min_speed_hz = host->max_speed_hz / 254;
sspi->aces = device_property_read_bool(&pdev->dev,
"socionext,set-aces");
sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
- master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
+ host->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
rx_irq = platform_get_irq(pdev, 0);
if (rx_irq <= 0) {
@@ -699,27 +699,27 @@ static int synquacer_spi_probe(struct platform_device *pdev)
goto disable_clk;
}
- master->dev.of_node = np;
- master->dev.fwnode = pdev->dev.fwnode;
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
+ host->dev.of_node = np;
+ host->dev.fwnode = pdev->dev.fwnode;
+ host->auto_runtime_pm = true;
+ host->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
- SPI_TX_QUAD | SPI_RX_QUAD;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
- SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
- master->set_cs = synquacer_spi_set_cs;
- master->transfer_one = synquacer_spi_transfer_one;
+ host->set_cs = synquacer_spi_set_cs;
+ host->transfer_one = synquacer_spi_transfer_one;
- ret = synquacer_spi_enable(master);
+ ret = synquacer_spi_enable(host);
if (ret)
goto disable_clk;
pm_runtime_set_active(sspi->dev);
pm_runtime_enable(sspi->dev);
- ret = devm_spi_register_master(sspi->dev, master);
+ ret = devm_spi_register_controller(sspi->dev, host);
if (ret)
goto disable_pm;
@@ -730,15 +730,15 @@ disable_pm:
disable_clk:
clk_disable_unprepare(sspi->clk);
put_spi:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static void synquacer_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
pm_runtime_disable(sspi->dev);
@@ -747,11 +747,11 @@ static void synquacer_spi_remove(struct platform_device *pdev)
static int __maybe_unused synquacer_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -763,8 +763,8 @@ static int __maybe_unused synquacer_spi_suspend(struct device *dev)
static int __maybe_unused synquacer_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
if (!pm_runtime_suspended(dev)) {
@@ -778,7 +778,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
return ret;
}
- ret = synquacer_spi_enable(master);
+ ret = synquacer_spi_enable(host);
if (ret) {
clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
@@ -786,7 +786,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
}
}
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret < 0)
clk_disable_unprepare(sspi->clk);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 460f232dad50..bc7cc4088eea 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -164,7 +164,7 @@ struct tegra_spi_client_data {
struct tegra_spi_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
spinlock_t lock;
struct clk *clk;
@@ -718,7 +718,7 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct spi_delay *setup = &spi->cs_setup;
struct spi_delay *hold = &spi->cs_hold;
struct spi_delay *inactive = &spi->cs_inactive;
@@ -772,7 +772,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
bool is_first_of_msg,
bool is_single_xfer)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -865,7 +865,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
static int tegra_spi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
unsigned total_fifo_words;
int ret;
@@ -912,10 +912,10 @@ static struct tegra_spi_client_data
*tegra_spi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_spi_client_data *cdata;
- struct device_node *slave_np;
+ struct device_node *target_np;
- slave_np = spi->dev.of_node;
- if (!slave_np) {
+ target_np = spi->dev.of_node;
+ if (!target_np) {
dev_dbg(&spi->dev, "device node not found\n");
return NULL;
}
@@ -924,9 +924,9 @@ static struct tegra_spi_client_data
if (!cdata)
return NULL;
- of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ of_property_read_u32(target_np, "nvidia,tx-clk-tap-delay",
&cdata->tx_clk_tap_delay);
- of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ of_property_read_u32(target_np, "nvidia,rx-clk-tap-delay",
&cdata->rx_clk_tap_delay);
return cdata;
}
@@ -942,7 +942,7 @@ static void tegra_spi_cleanup(struct spi_device *spi)
static int tegra_spi_setup(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 val;
unsigned long flags;
@@ -993,7 +993,7 @@ static int tegra_spi_setup(struct spi_device *spi)
static void tegra_spi_transfer_end(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
/* GPIO based chip select control */
@@ -1025,11 +1025,11 @@ static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
tegra_spi_readl(tspi, SPI_FIFO_STATUS));
}
-static int tegra_spi_transfer_one_message(struct spi_master *master,
+static int tegra_spi_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
bool is_first_msg = true;
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
@@ -1078,7 +1078,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
goto complete_xfer;
}
@@ -1112,7 +1112,7 @@ complete_xfer:
ret = 0;
exit:
msg->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -1293,40 +1293,40 @@ MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
static int tegra_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
int bus_num;
- master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tspi));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- tspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ tspi = spi_controller_get_devdata(host);
if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
- SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->setup = tegra_spi_setup;
- master->cleanup = tegra_spi_cleanup;
- master->transfer_one_message = tegra_spi_transfer_one_message;
- master->set_cs_timing = tegra_spi_set_hw_cs_timing;
- master->num_chipselect = MAX_CHIP_SELECT;
- master->auto_runtime_pm = true;
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ host->setup = tegra_spi_setup;
+ host->cleanup = tegra_spi_cleanup;
+ host->transfer_one_message = tegra_spi_transfer_one_message;
+ host->set_cs_timing = tegra_spi_set_hw_cs_timing;
+ host->num_chipselect = MAX_CHIP_SELECT;
+ host->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
- master->bus_num = bus_num;
+ host->bus_num = bus_num;
- tspi->master = master;
+ tspi->host = host;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
@@ -1334,20 +1334,20 @@ static int tegra_spi_probe(struct platform_device *pdev)
if (!tspi->soc_data) {
dev_err(&pdev->dev, "unsupported tegra\n");
ret = -ENODEV;
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
if (spi_irq < 0) {
ret = spi_irq;
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->irq = spi_irq;
@@ -1355,14 +1355,14 @@ static int tegra_spi_probe(struct platform_device *pdev)
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1370,7 +1370,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1401,7 +1401,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
tegra_spi_isr_thread, IRQF_ONESHOT,
@@ -1412,10 +1412,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- master->dev.of_node = pdev->dev.of_node;
- ret = devm_spi_register_master(&pdev->dev, master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_free_irq;
}
return ret;
@@ -1429,15 +1429,15 @@ exit_pm_disable:
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
free_irq(tspi->irq, tspi);
@@ -1455,15 +1455,15 @@ static void tegra_spi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int tegra_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1473,17 +1473,17 @@ static int tegra_spi_resume(struct device *dev)
}
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int tegra_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_spi_readl(tspi, SPI_COMMAND1);
@@ -1494,8 +1494,8 @@ static int tegra_spi_runtime_suspend(struct device *dev)
static int tegra_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tspi->clk);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 0c5507473f97..9f6b9f89be5b 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -102,7 +102,7 @@
struct tegra_sflash_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
spinlock_t lock;
struct clk *clk;
@@ -251,7 +251,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg,
bool is_single_xfer)
{
- struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(spi->controller);
u32 speed;
u32 command;
@@ -303,12 +303,12 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
-static int tegra_sflash_transfer_one_message(struct spi_master *master,
+static int tegra_sflash_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
bool is_first_msg = true;
int single_xfer;
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
@@ -351,7 +351,7 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
exit:
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
msg->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -416,7 +416,7 @@ MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
static int tegra_sflash_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_sflash_data *tsd;
int ret;
const struct of_device_id *match;
@@ -427,37 +427,37 @@ static int tegra_sflash_probe(struct platform_device *pdev)
return -ENODEV;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tsd));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->transfer_one_message = tegra_sflash_transfer_one_message;
- master->auto_runtime_pm = true;
- master->num_chipselect = MAX_CHIP_SELECT;
-
- platform_set_drvdata(pdev, master);
- tsd = spi_master_get_devdata(master);
- tsd->master = master;
+ host->mode_bits = SPI_CPOL | SPI_CPHA;
+ host->transfer_one_message = tegra_sflash_transfer_one_message;
+ host->auto_runtime_pm = true;
+ host->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, host);
+ tsd = spi_controller_get_devdata(host);
+ tsd->host = host;
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
tsd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tsd->base)) {
ret = PTR_ERR(tsd->base);
- goto exit_free_master;
+ goto exit_free_host;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
tsd->irq = ret;
ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
@@ -465,7 +465,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tsd->irq);
- goto exit_free_master;
+ goto exit_free_host;
}
tsd->clk = devm_clk_get(&pdev->dev, NULL);
@@ -505,10 +505,10 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
pm_runtime_put(&pdev->dev);
- master->dev.of_node = pdev->dev.of_node;
- ret = devm_spi_register_master(&pdev->dev, master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_pm_disable;
}
return ret;
@@ -519,15 +519,15 @@ exit_pm_disable:
tegra_sflash_runtime_suspend(&pdev->dev);
exit_free_irq:
free_irq(tsd->irq, tsd);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_sflash_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
free_irq(tsd->irq, tsd);
@@ -539,15 +539,15 @@ static void tegra_sflash_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int tegra_sflash_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_sflash_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -558,14 +558,14 @@ static int tegra_sflash_resume(struct device *dev)
tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int tegra_sflash_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_sflash_readl(tsd, SPI_COMMAND);
@@ -576,8 +576,8 @@ static int tegra_sflash_runtime_suspend(struct device *dev)
static int tegra_sflash_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tsd->clk);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index f5cd365c913a..ed1393d159ae 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -152,7 +152,7 @@ struct tegra_slink_chip_data {
struct tegra_slink_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
const struct tegra_slink_chip_data *chip_data;
spinlock_t lock;
@@ -671,7 +671,7 @@ static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
static int tegra_slink_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller);
u32 speed;
u8 bits_per_word;
unsigned total_fifo_words;
@@ -737,7 +737,7 @@ static int tegra_slink_setup(struct spi_device *spi)
SLINK_CS_POLARITY3,
};
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller);
u32 val;
unsigned long flags;
int ret;
@@ -768,10 +768,10 @@ static int tegra_slink_setup(struct spi_device *spi)
return 0;
}
-static int tegra_slink_prepare_message(struct spi_master *master,
+static int tegra_slink_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
tegra_slink_clear_status(tspi);
@@ -794,11 +794,11 @@ static int tegra_slink_prepare_message(struct spi_master *master,
return 0;
}
-static int tegra_slink_transfer_one(struct spi_master *master,
+static int tegra_slink_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
reinit_completion(&tspi->xfer_completion);
@@ -825,10 +825,10 @@ static int tegra_slink_transfer_one(struct spi_master *master,
return 0;
}
-static int tegra_slink_unprepare_message(struct spi_master *master,
+static int tegra_slink_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
@@ -999,7 +999,7 @@ MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
static int tegra_slink_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_slink_data *tspi;
struct resource *r;
int ret, spi_irq;
@@ -1007,36 +1007,36 @@ static int tegra_slink_probe(struct platform_device *pdev)
cdata = of_device_get_match_data(&pdev->dev);
- master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tspi));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->setup = tegra_slink_setup;
- master->prepare_message = tegra_slink_prepare_message;
- master->transfer_one = tegra_slink_transfer_one;
- master->unprepare_message = tegra_slink_unprepare_message;
- master->auto_runtime_pm = true;
- master->num_chipselect = MAX_CHIP_SELECT;
-
- platform_set_drvdata(pdev, master);
- tspi = spi_master_get_devdata(master);
- tspi->master = master;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->setup = tegra_slink_setup;
+ host->prepare_message = tegra_slink_prepare_message;
+ host->transfer_one = tegra_slink_transfer_one;
+ host->unprepare_message = tegra_slink_unprepare_message;
+ host->auto_runtime_pm = true;
+ host->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, host);
+ tspi = spi_controller_get_devdata(host);
+ tspi->host = host;
tspi->dev = &pdev->dev;
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->phys = r->start;
@@ -1045,26 +1045,26 @@ static int tegra_slink_probe(struct platform_device *pdev)
if (IS_ERR(tspi->clk)) {
ret = PTR_ERR(tspi->clk);
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_master;
+ goto exit_free_host;
}
ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (ret)
- goto exit_free_master;
+ goto exit_free_host;
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
ret = tegra_slink_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
ret = tegra_slink_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1103,10 +1103,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_free_irq;
}
@@ -1124,17 +1124,17 @@ exit_pm_disable:
tegra_slink_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_slink_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
free_irq(tspi->irq, tspi);
@@ -1146,21 +1146,21 @@ static void tegra_slink_remove(struct platform_device *pdev)
if (tspi->rx_dma_chan)
tegra_slink_deinit_dma_param(tspi, true);
- spi_master_put(master);
+ spi_controller_put(host);
}
#ifdef CONFIG_PM_SLEEP
static int tegra_slink_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_slink_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1172,14 +1172,14 @@ static int tegra_slink_resume(struct device *dev)
tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_slink_readl(tspi, SLINK_MAS_DATA);
@@ -1190,8 +1190,8 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tspi->clk);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index e9ad9b0b598b..afbd64a217eb 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -175,7 +175,7 @@ struct tegra_qspi_client_data {
struct tegra_qspi {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
/* lock to protect data accessed by irq */
spinlock_t lock;
@@ -809,7 +809,7 @@ err_out:
static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
bool is_first_of_msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
struct tegra_qspi_client_data *cdata = spi->controller_data;
u32 command1, command2, speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -870,7 +870,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
static int tegra_qspi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
unsigned int total_fifo_words;
u8 bus_width = 0;
int ret;
@@ -925,7 +925,7 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
@@ -941,7 +941,7 @@ static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic
static int tegra_qspi_setup(struct spi_device *spi)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
struct tegra_qspi_client_data *cdata = spi->controller_data;
unsigned long flags;
u32 val;
@@ -1005,7 +1005,7 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
static void tegra_qspi_transfer_end(struct spi_device *spi)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
if (cs_val)
@@ -1316,10 +1316,10 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
return true;
}
-static int tegra_qspi_transfer_one_message(struct spi_master *master,
+static int tegra_qspi_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
if (tegra_qspi_validate_cmb_seq(tqspi, msg))
@@ -1327,7 +1327,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master,
else
ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -1533,38 +1533,38 @@ MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
static int tegra_qspi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_qspi *tqspi;
struct resource *r;
int ret, qspi_irq;
int bus_num;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
- tqspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ tqspi = spi_controller_get_devdata(host);
- master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
- SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->setup = tegra_qspi_setup;
- master->transfer_one_message = tegra_qspi_transfer_one_message;
- master->num_chipselect = 1;
- master->auto_runtime_pm = true;
+ host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->setup = tegra_qspi_setup;
+ host->transfer_one_message = tegra_qspi_transfer_one_message;
+ host->num_chipselect = 1;
+ host->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
- master->bus_num = bus_num;
+ host->bus_num = bus_num;
- tqspi->master = master;
+ tqspi->host = host;
tqspi->dev = &pdev->dev;
spin_lock_init(&tqspi->lock);
tqspi->soc_data = device_get_match_data(&pdev->dev);
- master->num_chipselect = tqspi->soc_data->cs_count;
+ host->num_chipselect = tqspi->soc_data->cs_count;
tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tqspi->base))
return PTR_ERR(tqspi->base);
@@ -1625,10 +1625,10 @@ static int tegra_qspi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to register master: %d\n", ret);
+ dev_err(&pdev->dev, "failed to register host: %d\n", ret);
goto exit_free_irq;
}
@@ -1644,10 +1644,10 @@ exit_pm_disable:
static void tegra_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
free_irq(tqspi->irq, tqspi);
pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
@@ -1655,15 +1655,15 @@ static void tegra_qspi_remove(struct platform_device *pdev)
static int __maybe_unused tegra_qspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int __maybe_unused tegra_qspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1676,13 +1676,13 @@ static int __maybe_unused tegra_qspi_resume(struct device *dev)
tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
/* Runtime pm disabled with ACPI */
if (has_acpi_companion(tqspi->dev))
@@ -1697,8 +1697,8 @@ static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
/* Runtime pm disabled with ACPI */
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 4c81516b67db..0fe6899e78dd 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -40,7 +40,7 @@ struct ti_qspi {
/* list synchronization */
struct mutex list_lock;
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
void __iomem *mmap_base;
size_t mmap_size;
@@ -137,20 +137,20 @@ static inline void ti_qspi_write(struct ti_qspi *qspi,
static int ti_qspi_setup(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
int ret;
- if (spi->master->busy) {
- dev_dbg(qspi->dev, "master busy doing other transfers\n");
+ if (spi->controller->busy) {
+ dev_dbg(qspi->dev, "host busy doing other transfers\n");
return -EBUSY;
}
- if (!qspi->master->max_speed_hz) {
+ if (!qspi->host->max_speed_hz) {
dev_err(qspi->dev, "spi max frequency not defined\n");
return -EINVAL;
}
- spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
+ spi->max_speed_hz = min(spi->max_speed_hz, qspi->host->max_speed_hz);
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0) {
@@ -526,7 +526,7 @@ static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
static void ti_qspi_enable_memory_map(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
@@ -540,7 +540,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
static void ti_qspi_disable_memory_map(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
@@ -554,7 +554,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
u8 data_nbits, u8 addr_width,
u8 dummy_bytes)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
u32 memval = opcode;
switch (data_nbits) {
@@ -576,7 +576,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
size_t max_len;
if (op->data.dir == SPI_MEM_DATA_IN) {
@@ -606,19 +606,19 @@ static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
u32 from = 0;
int ret = 0;
/* Only optimize read path. */
if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
!op->addr.nbytes || op->addr.nbytes > 4)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Address exceeds MMIO window size, fall back to regular mode. */
from = op->addr.val;
if (from + op->data.nbytes > qspi->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
mutex_lock(&qspi->list_lock);
@@ -633,10 +633,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
struct sg_table sgt;
if (virt_addr_valid(op->data.buf.in) &&
- !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
+ !spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
&sgt)) {
ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
- spi_controller_dma_unmap_mem_op_data(mem->spi->master,
+ spi_controller_dma_unmap_mem_op_data(mem->spi->controller,
op, &sgt);
} else {
ret = ti_qspi_dma_bounce_buffer(qspi, from,
@@ -658,10 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.adjust_op_size = ti_qspi_adjust_op_size,
};
-static int ti_qspi_start_transfer_one(struct spi_master *master,
+static int ti_qspi_start_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
- struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(host);
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0, ret;
@@ -720,7 +720,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
m->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return status;
}
@@ -756,33 +756,33 @@ MODULE_DEVICE_TABLE(of, ti_qspi_match);
static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *r, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
dma_cap_mask_t mask;
- master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*qspi));
+ if (!host)
return -ENOMEM;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->setup = ti_qspi_setup;
- master->auto_runtime_pm = true;
- master->transfer_one_message = ti_qspi_start_transfer_one;
- master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
- SPI_BPW_MASK(8);
- master->mem_ops = &ti_qspi_mem_ops;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->setup = ti_qspi_setup;
+ host->auto_runtime_pm = true;
+ host->transfer_one_message = ti_qspi_start_transfer_one;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ host->mem_ops = &ti_qspi_mem_ops;
if (!of_property_read_u32(np, "num-cs", &num_cs))
- master->num_chipselect = num_cs;
+ host->num_chipselect = num_cs;
- qspi = spi_master_get_devdata(master);
- qspi->master = master;
+ qspi = spi_controller_get_devdata(host);
+ qspi->host = host;
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
@@ -792,7 +792,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (r == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
ret = -ENODEV;
- goto free_master;
+ goto free_host;
}
}
@@ -812,7 +812,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto free_master;
+ goto free_host;
}
mutex_init(&qspi->list_lock);
@@ -820,7 +820,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(qspi->base)) {
ret = PTR_ERR(qspi->base);
- goto free_master;
+ goto free_host;
}
@@ -830,7 +830,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
"syscon-chipselects");
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
- goto free_master;
+ goto free_host;
}
ret = of_property_read_u32_index(np,
"syscon-chipselects",
@@ -838,7 +838,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"couldn't get ctrl_mod reg index\n");
- goto free_master;
+ goto free_host;
}
}
@@ -853,7 +853,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
- master->max_speed_hz = max_freq;
+ host->max_speed_hz = max_freq;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -876,7 +876,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
dma_release_channel(qspi->rx_chan);
goto no_dma;
}
- master->dma_rx = qspi->rx_chan;
+ host->dma_rx = qspi->rx_chan;
init_completion(&qspi->transfer_complete);
if (res_mmap)
qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
@@ -889,39 +889,40 @@ no_dma:
"mmap failed with error %ld using PIO mode\n",
PTR_ERR(qspi->mmap_base));
qspi->mmap_base = NULL;
- master->mem_ops = NULL;
+ host->mem_ops = NULL;
}
}
qspi->mmap_enabled = false;
qspi->current_cs = -1;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (!ret)
return 0;
ti_qspi_dma_cleanup(qspi);
pm_runtime_disable(&pdev->dev);
-free_master:
- spi_master_put(master);
+free_host:
+ spi_controller_put(host);
return ret;
}
-static int ti_qspi_remove(struct platform_device *pdev)
+static void ti_qspi_remove(struct platform_device *pdev)
{
struct ti_qspi *qspi = platform_get_drvdata(pdev);
int rc;
- rc = spi_master_suspend(qspi->master);
- if (rc)
- return rc;
+ rc = spi_controller_suspend(qspi->host);
+ if (rc) {
+ dev_alert(&pdev->dev, "spi_controller_suspend() failed (%pe)\n",
+ ERR_PTR(rc));
+ return;
+ }
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
ti_qspi_dma_cleanup(qspi);
-
- return 0;
}
static const struct dev_pm_ops ti_qspi_pm_ops = {
@@ -930,7 +931,7 @@ static const struct dev_pm_ops ti_qspi_pm_ops = {
static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
- .remove = ti_qspi_remove,
+ .remove_new = ti_qspi_remove,
.driver = {
.name = "ti-qspi",
.pm = &ti_qspi_pm_ops,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index af5846cfe5e9..271f3e7f834b 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -124,7 +124,7 @@ struct pch_spi_dma_ctrl {
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
* @io_base_addr: Base address
- * @master: Pointer to the SPI master structure
+ * @host: Pointer to the SPI controller structure
* @work: Reference to work queue handler
* @wait: Wait queue for waking up upon receiving an
* interrupt.
@@ -161,7 +161,7 @@ struct pch_spi_dma_ctrl {
struct pch_spi_data {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
- struct spi_master *master;
+ struct spi_controller *host;
struct work_struct work;
wait_queue_head_t wait;
u8 transfer_complete;
@@ -216,48 +216,48 @@ static const struct pci_device_id pch_spi_pcidev_id[] = {
/**
* pch_spi_writereg() - Performs register writes
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @idx: Register offset.
* @val: Value to be written to register.
*/
-static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
+static inline void pch_spi_writereg(struct spi_controller *host, int idx, u32 val)
{
- struct pch_spi_data *data = spi_master_get_devdata(master);
+ struct pch_spi_data *data = spi_controller_get_devdata(host);
iowrite32(val, (data->io_remap_addr + idx));
}
/**
* pch_spi_readreg() - Performs register reads
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @idx: Register offset.
*/
-static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
+static inline u32 pch_spi_readreg(struct spi_controller *host, int idx)
{
- struct pch_spi_data *data = spi_master_get_devdata(master);
+ struct pch_spi_data *data = spi_controller_get_devdata(host);
return ioread32(data->io_remap_addr + idx);
}
-static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
+static inline void pch_spi_setclr_reg(struct spi_controller *host, int idx,
u32 set, u32 clr)
{
- u32 tmp = pch_spi_readreg(master, idx);
+ u32 tmp = pch_spi_readreg(host, idx);
tmp = (tmp & ~clr) | set;
- pch_spi_writereg(master, idx, tmp);
+ pch_spi_writereg(host, idx, tmp);
}
-static void pch_spi_set_master_mode(struct spi_master *master)
+static void pch_spi_set_host_mode(struct spi_controller *host)
{
- pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPCR, SPCR_MSTR_BIT, 0);
}
/**
* pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
*/
-static void pch_spi_clear_fifo(struct spi_master *master)
+static void pch_spi_clear_fifo(struct spi_controller *host)
{
- pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
- pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
+ pch_spi_setclr_reg(host, PCH_SPCR, SPCR_FICLR_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPCR, 0, SPCR_FICLR_BIT);
}
static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
@@ -312,7 +312,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
if (reg_spsr_val & SPSR_FI_BIT) {
if ((tx_index == bpw_len) && (rx_index == tx_index)) {
/* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0,
PCH_ALL);
/* transfer is completed;
@@ -321,7 +321,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->transfer_active = false;
wake_up(&data->wait);
} else {
- dev_vdbg(&data->master->dev,
+ dev_vdbg(&data->host->dev,
"%s : Transfer is not completed",
__func__);
}
@@ -383,10 +383,10 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
/**
* pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @speed_hz: Baud rate.
*/
-static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
+static void pch_spi_set_baud_rate(struct spi_controller *host, u32 speed_hz)
{
u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
@@ -394,21 +394,21 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
- pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
+ pch_spi_setclr_reg(host, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
* pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @bits_per_word: Bits per word for SPI transfer.
*/
-static void pch_spi_set_bits_per_word(struct spi_master *master,
+static void pch_spi_set_bits_per_word(struct spi_controller *host,
u8 bits_per_word)
{
if (bits_per_word == 8)
- pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
+ pch_spi_setclr_reg(host, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
else
- pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
}
/**
@@ -420,12 +420,12 @@ static void pch_spi_setup_transfer(struct spi_device *spi)
u32 flags = 0;
dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
- __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
+ __func__, pch_spi_readreg(spi->controller, PCH_SPBRR),
spi->max_speed_hz);
- pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
+ pch_spi_set_baud_rate(spi->controller, spi->max_speed_hz);
/* set bits per word */
- pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
+ pch_spi_set_bits_per_word(spi->controller, spi->bits_per_word);
if (!(spi->mode & SPI_LSB_FIRST))
flags |= SPCR_LSBF_BIT;
@@ -433,29 +433,29 @@ static void pch_spi_setup_transfer(struct spi_device *spi)
flags |= SPCR_CPOL_BIT;
if (spi->mode & SPI_CPHA)
flags |= SPCR_CPHA_BIT;
- pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
+ pch_spi_setclr_reg(spi->controller, PCH_SPCR, flags,
(SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
/* Clear the FIFO by toggling FICLR to 1 and back to 0 */
- pch_spi_clear_fifo(spi->master);
+ pch_spi_clear_fifo(spi->controller);
}
/**
* pch_spi_reset() - Clears SPI registers
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
*/
-static void pch_spi_reset(struct spi_master *master)
+static void pch_spi_reset(struct spi_controller *host)
{
/* write 1 to reset SPI */
- pch_spi_writereg(master, PCH_SRST, 0x1);
+ pch_spi_writereg(host, PCH_SRST, 0x1);
/* clear reset */
- pch_spi_writereg(master, PCH_SRST, 0x0);
+ pch_spi_writereg(host, PCH_SRST, 0x0);
}
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
- struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
+ struct pch_spi_data *data = spi_controller_get_devdata(pspi->controller);
int retval;
unsigned long flags;
@@ -524,15 +524,15 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
- dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
- pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__);
+ pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
- dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
- pch_spi_set_bits_per_word(data->master,
+ dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__);
+ pch_spi_set_bits_per_word(data->host,
data->cur_trans->bits_per_word);
*bpw = data->cur_trans->bits_per_word;
} else {
@@ -590,13 +590,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
__func__);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
- pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
+ pch_spi_writereg(data->host, PCH_SPDWR, data->pkt_tx_buff[j]);
/* update tx_index */
data->tx_index = j;
@@ -609,13 +609,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
struct spi_message *pmsg, *tmp;
- dev_dbg(&data->master->dev, "%s called\n", __func__);
+ dev_dbg(&data->host->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
if (data->current_msg->complete) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
}
@@ -623,7 +623,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
/* update status in global variable */
data->bcurrent_msg_processing = false;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:data->bcurrent_msg_processing = false\n", __func__);
data->current_msg = NULL;
@@ -638,11 +638,11 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
* bpw;sfer requests in the current message or there are
*more messages)
*/
- dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
+ dev_dbg(&data->host->dev, "%s:Invoke queue_work\n", __func__);
schedule_work(&data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
@@ -662,14 +662,14 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
/* enable interrupts, set threshold, enable SPI */
if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
PCH_RX_THOLD << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_RFIE_BIT |
SPCR_ORIE_BIT | SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
else
/* set receive threshold to maximum */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_ORIE_BIT |
SPCR_SPE_BIT,
@@ -677,18 +677,18 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete);
/* clear all interrupts */
- pch_spi_writereg(data->master, PCH_SPSR,
- pch_spi_readreg(data->master, PCH_SPSR));
+ pch_spi_writereg(data->host, PCH_SPSR,
+ pch_spi_readreg(data->host, PCH_SPSR));
/* Disable interrupts and SPI transfer */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
/* clear FIFO */
- pch_spi_clear_fifo(data->master);
+ pch_spi_clear_fifo(data->host);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -750,25 +750,25 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
spin_lock_irqsave(&data->lock, flags);
/* disable interrupts, SPI set enable */
- pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
spin_unlock_irqrestore(&data->lock, flags);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:waiting for transfer to get over\n", __func__);
rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
if (!rtn)
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s wait-event timeout\n", __func__);
- dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ dma_sync_sg_for_cpu(&data->host->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
- dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ dma_sync_sg_for_cpu(&data->host->dev, dma->sg_tx_p, dma->nent,
DMA_FROM_DEVICE);
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
@@ -780,14 +780,14 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
spin_lock_irqsave(&data->lock, flags);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0,
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
SPCR_SPE_BIT);
/* clear all interrupts */
- pch_spi_writereg(data->master, PCH_SPSR,
- pch_spi_readreg(data->master, PCH_SPSR));
+ pch_spi_writereg(data->host, PCH_SPSR,
+ pch_spi_readreg(data->host, PCH_SPSR));
/* clear FIFO */
- pch_spi_clear_fifo(data->master);
+ pch_spi_clear_fifo(data->host);
spin_unlock_irqrestore(&data->lock, flags);
@@ -846,7 +846,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
goto out;
}
@@ -860,7 +860,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
@@ -913,9 +913,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
- dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz);
spin_unlock_irqrestore(&data->lock, flags);
}
@@ -923,9 +923,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word !=
data->cur_trans->bits_per_word)) {
- dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_set_bits_per_word(data->master,
+ pch_spi_set_bits_per_word(data->host,
data->cur_trans->bits_per_word);
spin_unlock_irqrestore(&data->lock, flags);
*bpw = data->cur_trans->bits_per_word;
@@ -969,12 +969,12 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
size = data->bpw_len;
rem = data->bpw_len;
}
- dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ dev_dbg(&data->host->dev, "%s num=%d size=%d rem=%d\n",
__func__, num, size, rem);
spin_lock_irqsave(&data->lock, flags);
/* set receive fifo threshold and transmit fifo threshold */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
(PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
@@ -1016,11 +1016,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
- dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_FROM_DEVICE);
desc_rx->callback = pch_dma_rx_complete;
desc_rx->callback_param = data;
dma->nent = num;
@@ -1078,20 +1078,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
- dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_TO_DEVICE);
desc_tx->callback = NULL;
desc_tx->callback_param = data;
dma->nent = num;
dma->desc_tx = desc_tx;
- dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->host->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW);
desc_rx->tx_submit(desc_rx);
desc_tx->tx_submit(desc_tx);
spin_unlock_irqrestore(&data->lock, flags);
@@ -1107,12 +1107,12 @@ static void pch_spi_process_messages(struct work_struct *pwork)
int bpw;
data = container_of(pwork, struct pch_spi_data, work);
- dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
+ dev_dbg(&data->host->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s suspend/remove initiated, flushing queue\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -1132,7 +1132,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
}
data->bcurrent_msg_processing = true;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s Set data->bcurrent_msg_processing= true\n", __func__);
/* Get the message from the queue and delete it from there. */
@@ -1150,7 +1150,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_NO_CONTROL);
do {
int cnt;
/* If we are already processing a message get the next
@@ -1161,14 +1161,14 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->cur_trans =
list_entry(data->current_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s :Getting 1st transfer message\n",
__func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s :Getting next transfer message\n",
__func__);
}
@@ -1210,7 +1210,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->cur_trans->len = data->save_total_len;
data->current_msg->actual_length += data->cur_trans->len;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
@@ -1229,7 +1229,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
} while (data->cur_trans != NULL);
out:
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
@@ -1248,7 +1248,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* reset PCH SPI h/w */
- pch_spi_reset(data->master);
+ pch_spi_reset(data->host);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
@@ -1297,22 +1297,22 @@ static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data;
dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
- master = spi_alloc_master(&board_dat->pdev->dev,
+ host = spi_alloc_host(&board_dat->pdev->dev,
sizeof(struct pch_spi_data));
- if (!master) {
- dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ if (!host) {
+ dev_err(&plat_dev->dev, "spi_alloc_host[%d] failed.\n",
plat_dev->id);
return -ENOMEM;
}
- data = spi_master_get_devdata(master);
- data->master = master;
+ data = spi_controller_get_devdata(host);
+ data->host = host;
platform_set_drvdata(plat_dev, data);
@@ -1330,13 +1330,13 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
- /* initialize members of SPI master */
- master->num_chipselect = PCH_MAX_CS;
- master->transfer = pch_spi_transfer;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->max_speed_hz = PCH_MAX_BAUDRATE;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ /* initialize members of SPI host */
+ host->num_chipselect = PCH_MAX_CS;
+ host->transfer = pch_spi_transfer;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ host->max_speed_hz = PCH_MAX_BAUDRATE;
+ host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
@@ -1365,25 +1365,25 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
}
data->irq_reg_sts = true;
- pch_spi_set_master_mode(master);
+ pch_spi_set_host_mode(host);
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
ret = pch_alloc_dma_buf(board_dat, data);
if (ret)
- goto err_spi_register_master;
+ goto err_spi_register_controller;
}
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret != 0) {
dev_err(&plat_dev->dev,
- "%s spi_register_master FAILED\n", __func__);
- goto err_spi_register_master;
+ "%s spi_register_controller FAILED\n", __func__);
+ goto err_spi_register_controller;
}
return 0;
-err_spi_register_master:
+err_spi_register_controller:
pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
@@ -1391,7 +1391,7 @@ err_request_irq:
err_spi_get_resources:
pci_iounmap(board_dat->pdev, data->io_remap_addr);
err_pci_iomap:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
@@ -1427,13 +1427,13 @@ static void pch_spi_pd_remove(struct platform_device *plat_dev)
/* disable interrupts & free IRQ */
if (data->irq_reg_sts) {
/* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL);
data->irq_reg_sts = false;
free_irq(board_dat->pdev->irq, data);
}
pci_iounmap(board_dat->pdev, data->io_remap_addr);
- spi_unregister_master(data->master);
+ spi_unregister_controller(data->host);
}
#ifdef CONFIG_PM
static int pch_spi_pd_suspend(struct platform_device *pd_dev,
@@ -1463,8 +1463,8 @@ static int pch_spi_pd_suspend(struct platform_device *pd_dev,
/* Free IRQ */
if (data->irq_reg_sts) {
/* disable all interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
- pch_spi_reset(data->master);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->host);
free_irq(board_dat->pdev->irq, data);
data->irq_reg_sts = false;
@@ -1498,8 +1498,8 @@ static int pch_spi_pd_resume(struct platform_device *pd_dev)
}
/* reset PCH SPI h/w */
- pch_spi_reset(data->master);
- pch_spi_set_master_mode(data->master);
+ pch_spi_reset(data->host);
+ pch_spi_set_host_mode(data->host);
data->irq_reg_sts = true;
}
return 0;
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index f5344527af0b..4a18cf896194 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -26,7 +26,7 @@ struct uniphier_spi_priv {
void __iomem *base;
dma_addr_t base_dma_addr;
struct clk *clk;
- struct spi_master *master;
+ struct spi_controller *host;
struct completion xfer_done;
int error;
@@ -127,7 +127,7 @@ static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
static void uniphier_spi_set_mode(struct spi_device *spi)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val1, val2;
/*
@@ -180,7 +180,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi)
static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(priv->base + SSI_TXWDS);
@@ -198,7 +198,7 @@ static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
static void uniphier_spi_set_baudrate(struct spi_device *spi,
unsigned int speed)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val, ckdiv;
/*
@@ -217,7 +217,7 @@ static void uniphier_spi_set_baudrate(struct spi_device *spi,
static void uniphier_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
priv->error = 0;
@@ -333,7 +333,7 @@ static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(priv->base + SSI_FPS);
@@ -346,16 +346,16 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
-static bool uniphier_spi_can_dma(struct spi_master *master,
+static bool uniphier_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
unsigned int bpw = bytes_per_word(priv->bits_per_word);
- if ((!master->dma_tx && !master->dma_rx)
- || (!master->dma_tx && t->tx_buf)
- || (!master->dma_rx && t->rx_buf))
+ if ((!host->dma_tx && !host->dma_rx)
+ || (!host->dma_tx && t->tx_buf)
+ || (!host->dma_rx && t->rx_buf))
return false;
return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
@@ -363,33 +363,33 @@ static bool uniphier_spi_can_dma(struct spi_master *master,
static void uniphier_spi_dma_rxcb(void *data)
{
- struct spi_master *master = data;
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
if (!(state & SSI_DMA_TX_BUSY))
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
static void uniphier_spi_dma_txcb(void *data)
{
- struct spi_master *master = data;
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
if (!(state & SSI_DMA_RX_BUSY))
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
-static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+static int uniphier_spi_transfer_one_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
int buswidth;
@@ -412,23 +412,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
.src_maxburst = SSI_FIFO_BURST_NUM,
};
- dmaengine_slave_config(master->dma_rx, &rxconf);
+ dmaengine_slave_config(host->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
- master->dma_rx,
+ host->dma_rx,
t->rx_sg.sgl, t->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto out_err_prep;
rxdesc->callback = uniphier_spi_dma_rxcb;
- rxdesc->callback_param = master;
+ rxdesc->callback_param = host;
uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
dmaengine_submit(rxdesc);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(host->dma_rx);
}
if (priv->tx_buf) {
@@ -439,23 +439,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
.dst_maxburst = SSI_FIFO_BURST_NUM,
};
- dmaengine_slave_config(master->dma_tx, &txconf);
+ dmaengine_slave_config(host->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
- master->dma_tx,
+ host->dma_tx,
t->tx_sg.sgl, t->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto out_err_prep;
txdesc->callback = uniphier_spi_dma_txcb;
- txdesc->callback_param = master;
+ txdesc->callback_param = host;
uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
dmaengine_submit(txdesc);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_tx);
}
/* signal that we need to wait for completion */
@@ -463,17 +463,17 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
out_err_prep:
if (rxdesc)
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(host->dma_rx);
return -EINVAL;
}
-static int uniphier_spi_transfer_one_irq(struct spi_master *master,
+static int uniphier_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
- struct device *dev = master->dev.parent;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
+ struct device *dev = host->dev.parent;
unsigned long time_left;
reinit_completion(&priv->xfer_done);
@@ -495,11 +495,11 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
return priv->error;
}
-static int uniphier_spi_transfer_one_poll(struct spi_master *master,
+static int uniphier_spi_transfer_one_poll(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int loop = SSI_POLL_TIMEOUT_US * 10;
while (priv->tx_bytes) {
@@ -520,14 +520,14 @@ static int uniphier_spi_transfer_one_poll(struct spi_master *master,
return 0;
irq_transfer:
- return uniphier_spi_transfer_one_irq(master, spi, t);
+ return uniphier_spi_transfer_one_irq(host, spi, t);
}
-static int uniphier_spi_transfer_one(struct spi_master *master,
+static int uniphier_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
unsigned long threshold;
bool use_dma;
@@ -537,9 +537,9 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_setup_transfer(spi, t);
- use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ use_dma = host->can_dma ? host->can_dma(host, spi, t) : false;
if (use_dma)
- return uniphier_spi_transfer_one_dma(master, spi, t);
+ return uniphier_spi_transfer_one_dma(host, spi, t);
/*
* If the transfer operation will take longer than
@@ -548,33 +548,33 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
USEC_PER_SEC * BITS_PER_BYTE);
if (t->len > threshold)
- return uniphier_spi_transfer_one_irq(master, spi, t);
+ return uniphier_spi_transfer_one_irq(host, spi, t);
else
- return uniphier_spi_transfer_one_poll(master, spi, t);
+ return uniphier_spi_transfer_one_poll(host, spi, t);
}
-static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
+static int uniphier_spi_prepare_transfer_hardware(struct spi_controller *host)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
writel(SSI_CTL_EN, priv->base + SSI_CTL);
return 0;
}
-static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
+static int uniphier_spi_unprepare_transfer_hardware(struct spi_controller *host)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
writel(0, priv->base + SSI_CTL);
return 0;
}
-static void uniphier_spi_handle_err(struct spi_master *master,
+static void uniphier_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
u32 val;
/* stop running spi transfer */
@@ -587,12 +587,12 @@ static void uniphier_spi_handle_err(struct spi_master *master,
uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
- dmaengine_terminate_async(master->dma_tx);
+ dmaengine_terminate_async(host->dma_tx);
atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
}
if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
- dmaengine_terminate_async(master->dma_rx);
+ dmaengine_terminate_async(host->dma_rx);
atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
}
}
@@ -641,7 +641,7 @@ done:
static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *res;
struct dma_slave_caps caps;
u32 dma_tx_burst = 0, dma_rx_burst = 0;
@@ -649,20 +649,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
int irq;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*priv));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*priv));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- priv = spi_master_get_devdata(master);
- priv->master = master;
+ priv = spi_controller_get_devdata(host);
+ priv->host = host;
priv->is_save_param = false;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
- goto out_master_put;
+ goto out_host_put;
}
priv->base_dma_addr = res->start;
@@ -670,12 +670,12 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(priv->clk);
- goto out_master_put;
+ goto out_host_put;
}
ret = clk_prepare_enable(priv->clk);
if (ret)
- goto out_master_put;
+ goto out_host_put;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -694,35 +694,35 @@ static int uniphier_spi_probe(struct platform_device *pdev)
clk_rate = clk_get_rate(priv->clk);
- master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
- master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ host->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
+ host->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->set_cs = uniphier_spi_set_cs;
- master->transfer_one = uniphier_spi_transfer_one;
- master->prepare_transfer_hardware
+ host->set_cs = uniphier_spi_set_cs;
+ host->transfer_one = uniphier_spi_transfer_one;
+ host->prepare_transfer_hardware
= uniphier_spi_prepare_transfer_hardware;
- master->unprepare_transfer_hardware
+ host->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
- master->handle_err = uniphier_spi_handle_err;
- master->can_dma = uniphier_spi_can_dma;
+ host->handle_err = uniphier_spi_handle_err;
+ host->can_dma = uniphier_spi_can_dma;
- master->num_chipselect = 1;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ host->num_chipselect = 1;
+ host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
- master->dma_tx = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR_OR_NULL(master->dma_tx)) {
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(host->dma_tx)) {
+ if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_disable_clk;
}
- master->dma_tx = NULL;
+ host->dma_tx = NULL;
dma_tx_burst = INT_MAX;
} else {
- ret = dma_get_slave_caps(master->dma_tx, &caps);
+ ret = dma_get_slave_caps(host->dma_tx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
@@ -731,16 +731,16 @@ static int uniphier_spi_probe(struct platform_device *pdev)
dma_tx_burst = caps.max_burst;
}
- master->dma_rx = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR_OR_NULL(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(host->dma_rx)) {
+ if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_release_dma;
}
- master->dma_rx = NULL;
+ host->dma_rx = NULL;
dma_rx_burst = INT_MAX;
} else {
- ret = dma_get_slave_caps(master->dma_rx, &caps);
+ ret = dma_get_slave_caps(host->dma_rx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
@@ -749,41 +749,41 @@ static int uniphier_spi_probe(struct platform_device *pdev)
dma_rx_burst = caps.max_burst;
}
- master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
+ host->max_dma_len = min(dma_tx_burst, dma_rx_burst);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
goto out_release_dma;
return 0;
out_release_dma:
- if (!IS_ERR_OR_NULL(master->dma_rx)) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (!IS_ERR_OR_NULL(host->dma_rx)) {
+ dma_release_channel(host->dma_rx);
+ host->dma_rx = NULL;
}
- if (!IS_ERR_OR_NULL(master->dma_tx)) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (!IS_ERR_OR_NULL(host->dma_tx)) {
+ dma_release_channel(host->dma_tx);
+ host->dma_tx = NULL;
}
out_disable_clk:
clk_disable_unprepare(priv->clk);
-out_master_put:
- spi_master_put(master);
+out_host_put:
+ spi_controller_put(host);
return ret;
}
static void uniphier_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
index 852ffe013d32..6b16a22cc3a4 100644
--- a/drivers/spi/spi-wpcm-fiu.c
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -361,7 +361,7 @@ static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
wpcm_fiu_stall_host(fiu, false);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
@@ -441,7 +441,7 @@ static int wpcm_fiu_probe(struct platform_device *pdev)
struct wpcm_fiu_spi *fiu;
struct resource *res;
- ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ ctrl = devm_spi_alloc_host(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index a3d57554f5ba..63354dd3110f 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -132,10 +132,10 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
return t->len;
}
-static int spi_xcomm_transfer_one(struct spi_master *master,
+static int spi_xcomm_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
- struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master);
+ struct spi_xcomm *spi_xcomm = spi_controller_get_devdata(host);
unsigned int settings = spi_xcomm->settings;
struct spi_device *spi = msg->spi;
unsigned cs_change = 0;
@@ -197,7 +197,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
spi_xcomm_chipselect(spi_xcomm, spi, false);
msg->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return status;
}
@@ -205,27 +205,27 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
static int spi_xcomm_probe(struct i2c_client *i2c)
{
struct spi_xcomm *spi_xcomm;
- struct spi_master *master;
+ struct spi_controller *host;
int ret;
- master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm));
- if (!master)
+ host = spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm));
+ if (!host)
return -ENOMEM;
- spi_xcomm = spi_master_get_devdata(master);
+ spi_xcomm = spi_controller_get_devdata(host);
spi_xcomm->i2c = i2c;
- master->num_chipselect = 16;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->transfer_one_message = spi_xcomm_transfer_one;
- master->dev.of_node = i2c->dev.of_node;
- i2c_set_clientdata(i2c, master);
+ host->num_chipselect = 16;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->transfer_one_message = spi_xcomm_transfer_one;
+ host->dev.of_node = i2c->dev.of_node;
+ i2c_set_clientdata(i2c, host);
- ret = devm_spi_register_master(&i2c->dev, master);
+ ret = devm_spi_register_controller(&i2c->dev, host);
if (ret < 0)
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 8e6e3876aa9a..12355957be97 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Xilinx SPI controller driver (master mode only)
+ * Xilinx SPI controller driver (host mode only)
*
* Author: MontaVista Software, Inc.
* source@mvista.com
@@ -83,7 +83,7 @@ struct xilinx_spi {
void __iomem *regs; /* virt. address of the control registers */
int irq;
- bool force_irq; /* force irq to setup master inhibit */
+ bool force_irq; /* force irq to setup host inhibit */
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
@@ -174,10 +174,10 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- /* Deselect the slave on the SPI bus */
+ /* Deselect the Target on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
- /* Disable the transmitter, enable Manual Slave Select Assertion,
- * put SPI controller into master mode, and enable it */
+ /* Disable the transmitter, enable Manual Target Select Assertion,
+ * put SPI controller into host mode, and enable it */
xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
regs_base + XSPI_CR_OFFSET);
@@ -185,12 +185,12 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
u16 cr;
u32 cs;
if (is_on == BITBANG_CS_INACTIVE) {
- /* Deselect the slave on the SPI bus */
+ /* Deselect the target on the SPI bus */
xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
return;
}
@@ -225,7 +225,7 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
if (spi->mode & SPI_CS_HIGH)
xspi->cs_inactive &= ~BIT(spi_get_chipselect(spi, 0));
@@ -237,7 +237,7 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
int remaining_words; /* the number of words left to transfer */
bool use_irq = false;
u16 cr = 0;
@@ -335,9 +335,9 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
}
-/* This driver supports single master mode only. Hence Tx FIFO Empty
+/* This driver supports single host mode only. Hence Tx FIFO Empty
* is the only interrupt we care about.
- * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Target Mode
* Fault are not to happen.
*/
static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
@@ -393,7 +393,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word;
- struct spi_master *master;
+ struct spi_controller *host;
bool force_irq = false;
u32 tmp;
u8 i;
@@ -415,26 +415,26 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (!num_cs) {
dev_err(&pdev->dev,
- "Missing slave select configuration data\n");
+ "Missing target select configuration data\n");
return -EINVAL;
}
if (num_cs > XILINX_SPI_MAX_CS) {
- dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ dev_err(&pdev->dev, "Invalid number of spi targets\n");
return -EINVAL;
}
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xilinx_spi));
+ if (!host)
return -ENODEV;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
- SPI_CS_HIGH;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+ SPI_CS_HIGH;
- xspi = spi_master_get_devdata(master);
+ xspi = spi_controller_get_devdata(host);
xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = master;
+ xspi->bitbang.master = host;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
@@ -444,9 +444,9 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (IS_ERR(xspi->regs))
return PTR_ERR(xspi->regs);
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = num_cs;
+ host->dev.of_node = pdev->dev.of_node;
/*
* Detect endianess on the IP via loop bit in CR. Detection
@@ -466,7 +466,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
- master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
+ host->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bytes_per_word = bits_per_word / 8;
xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
@@ -496,17 +496,17 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
- spi_new_device(master, pdata->devices + i);
+ spi_new_device(host, pdata->devices + i);
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
return 0;
}
static void xilinx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(host);
void __iomem *regs_base = xspi->regs;
spi_bitbang_stop(&xspi->bitbang);
@@ -516,7 +516,7 @@ static void xilinx_spi_remove(struct platform_device *pdev)
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_master_put(xspi->bitbang.master);
+ spi_controller_put(xspi->bitbang.master);
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 3b91cdd5ae21..49302364b7bd 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -95,7 +95,7 @@ struct xlp_spi_priv {
int rx_len; /* rx xfer length */
int txerrors; /* TXFIFO underflow count */
int rxerrors; /* RXFIFO overflow count */
- int cs; /* slave device chip select */
+ int cs; /* target device chip select */
u32 spi_clk; /* spi clock frequency */
bool cmd_cont; /* cs active */
struct completion done; /* completion notification */
@@ -138,7 +138,7 @@ static int xlp_spi_setup(struct spi_device *spi)
u32 fdiv, cfg;
int cs;
- xspi = spi_master_get_devdata(spi->master);
+ xspi = spi_controller_get_devdata(spi->controller);
cs = spi_get_chipselect(spi, 0);
/*
* The value of fdiv must be between 4 and 65535.
@@ -343,17 +343,17 @@ static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
return bytesleft;
}
-static int xlp_spi_transfer_one(struct spi_master *master,
+static int xlp_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
+ struct xlp_spi_priv *xspi = spi_controller_get_devdata(host);
int ret = 0;
xspi->cs = spi_get_chipselect(spi, 0);
xspi->dev = spi->dev;
- if (spi_transfer_is_last(master, t))
+ if (spi_transfer_is_last(host, t))
xspi->cmd_cont = 0;
else
xspi->cmd_cont = 1;
@@ -361,13 +361,13 @@ static int xlp_spi_transfer_one(struct spi_master *master,
if (xlp_spi_txrx_bufs(xspi, t))
ret = -EIO;
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return ret;
}
static int xlp_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct xlp_spi_priv *xspi;
struct clk *clk;
int irq, err;
@@ -398,28 +398,28 @@ static int xlp_spi_probe(struct platform_device *pdev)
xspi->spi_clk = clk_get_rate(clk);
- master = spi_alloc_master(&pdev->dev, 0);
- if (!master) {
- dev_err(&pdev->dev, "could not alloc master\n");
+ host = spi_alloc_host(&pdev->dev, 0);
+ if (!host) {
+ dev_err(&pdev->dev, "could not alloc host\n");
return -ENOMEM;
}
- master->bus_num = 0;
- master->num_chipselect = XLP_SPI_MAX_CS;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->setup = xlp_spi_setup;
- master->transfer_one = xlp_spi_transfer_one;
- master->dev.of_node = pdev->dev.of_node;
+ host->bus_num = 0;
+ host->num_chipselect = XLP_SPI_MAX_CS;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->setup = xlp_spi_setup;
+ host->transfer_one = xlp_spi_transfer_one;
+ host->dev.of_node = pdev->dev.of_node;
init_completion(&xspi->done);
- spi_master_set_devdata(master, xspi);
+ spi_controller_set_devdata(host, xspi);
xlp_spi_sysctl_setup(xspi);
/* register spi controller */
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
- dev_err(&pdev->dev, "spi register master failed!\n");
- spi_master_put(master);
+ dev_err(&pdev->dev, "spi register host failed!\n");
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index dbd85d7a1526..3c7721894376 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -53,7 +53,7 @@ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
u32 v, u8 bits, unsigned flags)
{
- struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller);
xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
xspi->data_sz += bits;
@@ -71,7 +71,7 @@ static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
{
- struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller);
WARN_ON(xspi->data_sz != 0);
xspi->data_sz = 0;
@@ -81,19 +81,19 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
{
struct xtfpga_spi *xspi;
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!host)
return -ENOMEM;
- master->flags = SPI_CONTROLLER_NO_RX;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
- master->bus_num = pdev->dev.id;
- master->dev.of_node = pdev->dev.of_node;
+ host->flags = SPI_CONTROLLER_NO_RX;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ host->bus_num = pdev->dev.id;
+ host->dev.of_node = pdev->dev.of_node;
- xspi = spi_master_get_devdata(master);
- xspi->bitbang.master = master;
+ xspi = spi_controller_get_devdata(host);
+ xspi->bitbang.master = host;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -113,17 +113,17 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
return 0;
}
static void xtfpga_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(host);
spi_bitbang_stop(&xspi->bitbang);
- spi_master_put(master);
+ spi_controller_put(host);
}
MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 0db69a2a72ff..d6325c6be3d4 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -54,10 +54,10 @@
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
/*
- * QSPI Configuration Register - Baud rate and slave select
+ * QSPI Configuration Register - Baud rate and target select
*
* These are the values used in the calculation of baud rate divisor and
- * setting the slave select.
+ * setting the target select.
*/
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
@@ -164,14 +164,14 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
- * - Master mode
+ * - Host mode
* - Baud rate divisor is set to 2
* - Tx threshold set to 1l Rx threshold set to 32
* - Flash memory interface mode enabled
* - Size of the word to be transferred as 8 bit
* This function performs the following actions
* - Disable and clear all the interrupts
- * - Enable manual slave select
+ * - Enable manual target select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the size of the word to be transferred as 32 bit
@@ -289,7 +289,7 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
- struct spi_controller *ctlr = spi->master;
+ struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
u32 config_reg;
@@ -377,7 +377,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
- struct spi_controller *ctlr = spi->master;
+ struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
@@ -525,7 +525,7 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->controller);
int err = 0, i;
u8 *tmpbuf;
@@ -637,7 +637,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
struct zynq_qspi *xqspi;
u32 num_cs;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
@@ -647,14 +647,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
- goto remove_master;
+ goto remove_ctlr;
}
init_completion(&xqspi->data_completion);
@@ -663,13 +663,13 @@ static int zynq_qspi_probe(struct platform_device *pdev)
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable APB clock.\n");
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->refclk);
@@ -715,7 +715,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
- dev_err(&pdev->dev, "spi_register_master failed\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller failed\n");
goto clk_dis_all;
}
@@ -725,7 +725,7 @@ clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
-remove_master:
+remove_ctlr:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 9a46b2478f4e..99524a3c9f38 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
- * (master mode only)
+ * (host mode only)
*
* Copyright (C) 2009 - 2015 Xilinx, Inc.
*/
@@ -235,21 +235,21 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave - For selection of slave device
+ * zynqmp_gqspi_selecttarget - For selection of target device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @slavecs: For chip select
- * @slavebus: To check which bus is selected- upper or lower
+ * @targetcs: For chip select
+ * @targetbus: To check which bus is selected- upper or lower
*/
-static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
- u8 slavecs, u8 slavebus)
+static void zynqmp_gqspi_selecttarget(struct zynqmp_qspi *instanceptr,
+ u8 targetcs, u8 targetbus)
{
/*
* Bus and CS lines selected here will be updated in the instance and
* used for subsequent GENFIFO entries during transfer.
*/
- /* Choose slave select line */
- switch (slavecs) {
+ /* Choose target select line */
+ switch (targetcs) {
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
@@ -261,11 +261,11 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
break;
default:
- dev_warn(instanceptr->dev, "Invalid slave select\n");
+ dev_warn(instanceptr->dev, "Invalid target select\n");
}
/* Choose the bus */
- switch (slavebus) {
+ switch (targetbus) {
case GQSPI_SELECT_FLASH_BUS_BOTH:
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
GQSPI_GENFIFO_BUS_UPPER;
@@ -277,7 +277,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
break;
default:
- dev_warn(instanceptr->dev, "Invalid slave bus\n");
+ dev_warn(instanceptr->dev, "Invalid target bus\n");
}
}
@@ -337,13 +337,13 @@ static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
- * - Master mode
+ * - Host mode
* - TX threshold set to 1
* - RX threshold set to 1
* - Flash memory interface mode enabled
* This function performs the following actions
* - Disable and clear all the interrupts
- * - Enable manual slave select
+ * - Enable manual target select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the little endian mode of TX FIFO
@@ -426,9 +426,9 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
GQSPI_RX_FIFO_THRESHOLD);
zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
- zynqmp_gqspi_selectslave(xqspi,
- GQSPI_SELECT_FLASH_CS_LOWER,
- GQSPI_SELECT_FLASH_BUS_LOWER);
+ zynqmp_gqspi_selecttarget(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
/* Initialize DMA */
zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_CTRL_OFST,
@@ -459,7 +459,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
*/
static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(qspi->controller);
ulong timeout;
u32 genfifoentry = 0, statusreg;
@@ -594,7 +594,7 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
*/
static int zynqmp_qspi_setup_op(struct spi_device *qspi)
{
- struct spi_controller *ctlr = qspi->master;
+ struct spi_controller *ctlr = qspi->controller;
struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
@@ -1048,7 +1048,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->master);
+ (mem->spi->controller);
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
@@ -1224,7 +1224,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
u32 num_cs;
const struct qspi_platform_data *p_data;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
@@ -1240,27 +1240,27 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(dev, "Unable to enable APB clock.\n");
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->refclk);
@@ -1346,7 +1346,7 @@ clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
-remove_master:
+remove_ctlr:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8ead7acb99f3..7477a11e12be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -612,10 +612,21 @@ static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
-
- if (spi->controller == new_spi->controller &&
- spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
- return -EBUSY;
+ int idx, nw_idx;
+ u8 cs, cs_nw;
+
+ if (spi->controller == new_spi->controller) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
+ cs_nw = spi_get_chipselect(new_spi, nw_idx);
+ if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
+ dev_err(dev, "chipselect %d already in use\n", cs_nw);
+ return -EBUSY;
+ }
+ }
+ }
+ }
return 0;
}
@@ -629,13 +640,32 @@ static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
- int status;
+ int status, idx, nw_idx;
+ u8 cs, nw_cs;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ /* Chipselects are numbered 0..max; validate. */
+ cs = spi_get_chipselect(spi, idx);
+ if (cs != 0xFF && cs >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+ }
- /* Chipselects are numbered 0..max; validate. */
- if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
- ctlr->num_chipselect);
- return -EINVAL;
+ /*
+ * Make sure that multiple logical CS doesn't map to the same physical CS.
+ * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
+ nw_cs = spi_get_chipselect(spi, nw_idx);
+ if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
+ dev_err(dev, "chipselect %d already in use\n", nw_cs);
+ return -EBUSY;
+ }
+ }
}
/* Set the bus ID string */
@@ -647,11 +677,8 @@ static int __spi_add_device(struct spi_device *spi)
* its configuration.
*/
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
- if (status) {
- dev_err(dev, "chipselect %d already in use\n",
- spi_get_chipselect(spi, 0));
+ if (status)
return status;
- }
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
@@ -659,8 +686,15 @@ static int __spi_add_device(struct spi_device *spi)
return -ENODEV;
}
- if (ctlr->cs_gpiods)
- spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
+ if (ctlr->cs_gpiods) {
+ u8 cs;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ if (cs != 0xFF)
+ spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
+ }
+ }
/*
* Drivers may modify this initial i/o setup, but will
@@ -701,6 +735,9 @@ int spi_add_device(struct spi_device *spi)
struct spi_controller *ctlr = spi->controller;
int status;
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock);
@@ -727,6 +764,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
{
struct spi_device *proxy;
int status;
+ u8 idx;
/*
* NOTE: caller did any chip->bus_num checks necessary.
@@ -742,6 +780,18 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(proxy, idx, 0xFF);
+
spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
@@ -750,6 +800,15 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ proxy->cs_index_mask = 0x01;
if (chip->swnode) {
status = device_add_software_node(&proxy->dev, chip->swnode);
@@ -942,32 +1001,51 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes
}
/*-------------------------------------------------------------------------*/
+static inline bool spi_is_last_cs(struct spi_device *spi)
+{
+ u8 idx;
+ bool last = false;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if ((spi->cs_index_mask >> idx) & 0x01) {
+ if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
+ last = true;
+ }
+ }
+ return last;
+}
+
static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
bool activate = enable;
+ u8 idx;
/*
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
- (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
+ if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
+ spi_is_last_cs(spi)) ||
+ (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
+ !spi_is_last_cs(spi))) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
+ spi->controller->last_cs_index_mask = spi->cs_index_mask;
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
- spi_delay_exec(&spi->cs_hold, NULL);
-
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi_get_csgpiod(spi, 0)) {
+ if (spi_is_csgpiod(spi)) {
+ if (!spi->controller->set_cs_timing && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -979,26 +1057,38 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
- if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
- else
- /* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (((spi->cs_index_mask >> idx) & 0x01) &&
+ spi_get_csgpiod(spi, idx)) {
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
+ !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
+ activate);
+
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
+
+ if (!spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
-
- if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
- if (activate)
- spi_delay_exec(&spi->cs_setup, NULL);
- else
- spi_delay_exec(&spi->cs_inactive, NULL);
- }
}
#ifdef CONFIG_HAS_DMA
@@ -1361,6 +1451,9 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
"SPI transfer timed out\n");
return -ETIMEDOUT;
}
+
+ if (xfer->error & SPI_TRANS_FAIL_IO)
+ return -EIO;
}
return 0;
@@ -2222,8 +2315,8 @@ static void of_spi_parse_dt_cs_delay(struct device_node *nc,
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
- u32 value;
- int rc;
+ u32 value, cs[SPI_CS_CNT_MAX];
+ int rc, idx;
/* Mode (clock phase/polarity/etc.) */
if (of_property_read_bool(nc, "spi-cpha"))
@@ -2295,14 +2388,53 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
return 0;
}
+ if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
+ dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, 0xFF);
+
/* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
+ rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
+ SPI_CS_CNT_MAX);
+ if (rc < 0) {
dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, rc);
return rc;
}
- spi_set_chipselect(spi, 0, value);
+ if (rc > ctlr->num_chipselect) {
+ dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
+ nc, rc);
+ return rc;
+ }
+ if ((of_property_read_bool(nc, "parallel-memories")) &&
+ (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
+ dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
+ return -EINVAL;
+ }
+ for (idx = 0; idx < rc; idx++)
+ spi_set_chipselect(spi, idx, cs[idx]);
+
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ spi->cs_index_mask = 0x01;
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
@@ -2408,6 +2540,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
+ u8 idx;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
@@ -2418,12 +2551,33 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(ancillary, idx, 0xFF);
+
/* Use provided chip-select for ancillary device */
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ ancillary->cs_index_mask = 0x01;
WARN_ON(!mutex_is_locked(&ctlr->add_lock));
@@ -2626,6 +2780,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
+ u8 idx;
if (!ctlr && index == -1)
return ERR_PTR(-EINVAL);
@@ -2661,12 +2816,33 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, 0xFF);
+
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi_set_chipselect(spi, 0, lookup.chip_select);
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ spi->cs_index_mask = 0x01;
return spi;
}
@@ -3100,6 +3276,7 @@ int spi_register_controller(struct spi_controller *ctlr)
struct boardinfo *bi;
int first_dynamic;
int status;
+ int idx;
if (!dev)
return -ENODEV;
@@ -3164,7 +3341,8 @@ int spi_register_controller(struct spi_controller *ctlr)
}
/* Setting last_cs to -1 means no chip selected */
- ctlr->last_cs = -1;
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ ctlr->last_cs[idx] = -1;
status = device_add(&ctlr->dev);
if (status < 0)
@@ -3889,7 +4067,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi_get_csgpiod(spi, 0))) {
+ spi_is_csgpiod(spi))) {
size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
int ret;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c81a00fbca7d..59883502eff4 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -76,10 +76,6 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
-config THERMAL_ACPI
- depends on ACPI
- bool
-
config THERMAL_WRITABLE_TRIPS
bool "Enable writable trip points"
help
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c934cab309ae..a8318d671036 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -13,7 +13,6 @@ thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
thermal_sys-$(CONFIG_THERMAL_OF) += thermal_of.o
-thermal_sys-$(CONFIG_THERMAL_ACPI) += thermal_acpi.o
# governors
CFLAGS_gov_power_allocator.o := -I$(src)
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 5877cde25b79..df7a5ed55385 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -167,13 +167,11 @@ static int amlogic_thermal_enable(struct amlogic_thermal *data)
return 0;
}
-static int amlogic_thermal_disable(struct amlogic_thermal *data)
+static void amlogic_thermal_disable(struct amlogic_thermal *data)
{
regmap_update_bits(data->regmap, TSENSOR_CFG_REG1,
TSENSOR_CFG_REG1_ENABLE, 0);
clk_disable_unprepare(data->clk);
-
- return 0;
}
static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
@@ -298,27 +296,30 @@ static void amlogic_thermal_remove(struct platform_device *pdev)
amlogic_thermal_disable(data);
}
-static int __maybe_unused amlogic_thermal_suspend(struct device *dev)
+static int amlogic_thermal_suspend(struct device *dev)
{
struct amlogic_thermal *data = dev_get_drvdata(dev);
- return amlogic_thermal_disable(data);
+ amlogic_thermal_disable(data);
+
+ return 0;
}
-static int __maybe_unused amlogic_thermal_resume(struct device *dev)
+static int amlogic_thermal_resume(struct device *dev)
{
struct amlogic_thermal *data = dev_get_drvdata(dev);
return amlogic_thermal_enable(data);
}
-static SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops,
- amlogic_thermal_suspend, amlogic_thermal_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops,
+ amlogic_thermal_suspend,
+ amlogic_thermal_resume);
static struct platform_driver amlogic_thermal_driver = {
.driver = {
.name = "amlogic_thermal",
- .pm = &amlogic_thermal_pm_ops,
+ .pm = pm_ptr(&amlogic_thermal_pm_ops),
.of_match_table = of_amlogic_thermal_match,
},
.probe = amlogic_thermal_probe,
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
index 69f4c0a8dfcc..f678c1281862 100644
--- a/drivers/thermal/cpuidle_cooling.c
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -66,7 +66,7 @@ static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us,
* @state : a pointer to the state variable to be filled
*
* The function always returns 100 as the injection ratio. It is
- * percentile based for consistency accross different platforms.
+ * percentile based for consistency across different platforms.
*
* Return: The function can not fail, it is always zero
*/
@@ -146,7 +146,7 @@ static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-/**
+/*
* cpuidle_cooling_ops - thermal cooling device ops
*/
static struct thermal_cooling_device_ops cpuidle_cooling_ops = {
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 83d4f451b1a9..7b6aa265ff6a 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -47,6 +47,22 @@ static inline s64 div_frac(s64 x, s64 y)
}
/**
+ * struct power_actor - internal power information for power actor
+ * @req_power: requested power value (not weighted)
+ * @max_power: max allocatable power for this actor
+ * @granted_power: granted power for this actor
+ * @extra_actor_power: extra power that this actor can receive
+ * @weighted_req_power: weighted requested power as input to IPA
+ */
+struct power_actor {
+ u32 req_power;
+ u32 max_power;
+ u32 granted_power;
+ u32 extra_actor_power;
+ u32 weighted_req_power;
+};
+
+/**
* struct power_allocator_params - parameters for the power allocator governor
* @allocated_tzp: whether we have allocated tzp for this thermal zone and
* it needs to be freed on unbind
@@ -59,9 +75,12 @@ static inline s64 div_frac(s64 x, s64 y)
* governor switches on when this trip point is crossed.
* If the thermal zone only has one passive trip point,
* @trip_switch_on should be NULL.
- * @trip_max_desired_temperature: last passive trip point of the thermal
- * zone. The temperature we are
- * controlling for.
+ * @trip_max: last passive trip point of the thermal zone. The
+ * temperature we are controlling for.
+ * @total_weight: Sum of all thermal instances weights
+ * @num_actors: number of cooling devices supporting IPA callbacks
+ * @buffer_size: internal buffer size, to avoid runtime re-calculation
+ * @power: buffer for all power actors internal power information
*/
struct power_allocator_params {
bool allocated_tzp;
@@ -69,9 +88,20 @@ struct power_allocator_params {
s32 prev_err;
u32 sustainable_power;
const struct thermal_trip *trip_switch_on;
- const struct thermal_trip *trip_max_desired_temperature;
+ const struct thermal_trip *trip_max;
+ int total_weight;
+ unsigned int num_actors;
+ unsigned int buffer_size;
+ struct power_actor *power;
};
+static bool power_actor_is_valid(struct power_allocator_params *params,
+ struct thermal_instance *instance)
+{
+ return (instance->trip == params->trip_max &&
+ cdev_is_power_actor(instance->cdev));
+}
+
/**
* estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
* @tz: thermal zone we are operating in
@@ -85,20 +115,17 @@ struct power_allocator_params {
*/
static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
{
- u32 sustainable_power = 0;
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
+ u32 sustainable_power = 0;
+ u32 min_power;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- struct thermal_cooling_device *cdev = instance->cdev;
- u32 min_power;
-
- if (instance->trip != params->trip_max_desired_temperature)
- continue;
-
- if (!cdev_is_power_actor(cdev))
+ if (!power_actor_is_valid(params, instance))
continue;
+ cdev = instance->cdev;
if (cdev->ops->state2power(cdev, instance->upper, &min_power))
continue;
@@ -212,10 +239,10 @@ static u32 pid_controller(struct thermal_zone_device *tz,
int control_temp,
u32 max_allocatable_power)
{
+ struct power_allocator_params *params = tz->governor_data;
s64 p, i, d, power_range;
s32 err, max_power_frac;
u32 sustainable_power;
- struct power_allocator_params *params = tz->governor_data;
max_power_frac = int_to_frac(max_allocatable_power);
@@ -303,15 +330,10 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
/**
* divvy_up_power() - divvy the allocated power between the actors
- * @req_power: each actor's requested power
- * @max_power: each actor's maximum available power
- * @num_actors: size of the @req_power, @max_power and @granted_power's array
- * @total_req_power: sum of @req_power
+ * @power: buffer for all power actors internal power information
+ * @num_actors: number of power actors in this thermal zone
+ * @total_req_power: sum of all weighted requested power for all actors
* @power_range: total allocated power
- * @granted_power: output array: each actor's granted power
- * @extra_actor_power: an appropriately sized array to be used in the
- * function as temporary storage of the extra power given
- * to the actors
*
* This function divides the total allocated power (@power_range)
* fairly between the actors. It first tries to give each actor a
@@ -324,15 +346,12 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
* If any actor received more than their maximum power, then that
* surplus is re-divvied among the actors based on how far they are
* from their respective maximums.
- *
- * Granted power for each actor is written to @granted_power, which
- * should've been allocated by the calling function.
*/
-static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
- u32 total_req_power, u32 power_range,
- u32 *granted_power, u32 *extra_actor_power)
+static void divvy_up_power(struct power_actor *power, int num_actors,
+ u32 total_req_power, u32 power_range)
{
- u32 extra_power, capped_extra_power;
+ u32 capped_extra_power = 0;
+ u32 extra_power = 0;
int i;
/*
@@ -341,24 +360,23 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
if (!total_req_power)
total_req_power = 1;
- capped_extra_power = 0;
- extra_power = 0;
for (i = 0; i < num_actors; i++) {
- u64 req_range = (u64)req_power[i] * power_range;
+ struct power_actor *pa = &power[i];
+ u64 req_range = (u64)pa->req_power * power_range;
- granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
- total_req_power);
+ pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
+ total_req_power);
- if (granted_power[i] > max_power[i]) {
- extra_power += granted_power[i] - max_power[i];
- granted_power[i] = max_power[i];
+ if (pa->granted_power > pa->max_power) {
+ extra_power += pa->granted_power - pa->max_power;
+ pa->granted_power = pa->max_power;
}
- extra_actor_power[i] = max_power[i] - granted_power[i];
- capped_extra_power += extra_actor_power[i];
+ pa->extra_actor_power = pa->max_power - pa->granted_power;
+ capped_extra_power += pa->extra_actor_power;
}
- if (!extra_power)
+ if (!extra_power || !capped_extra_power)
return;
/*
@@ -366,127 +384,95 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
* how far they are from the max
*/
extra_power = min(extra_power, capped_extra_power);
- if (capped_extra_power > 0)
- for (i = 0; i < num_actors; i++) {
- u64 extra_range = (u64)extra_actor_power[i] * extra_power;
- granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range,
- capped_extra_power);
- }
+
+ for (i = 0; i < num_actors; i++) {
+ struct power_actor *pa = &power[i];
+ u64 extra_range = pa->extra_actor_power;
+
+ extra_range *= extra_power;
+ pa->granted_power += DIV_ROUND_CLOSEST_ULL(extra_range,
+ capped_extra_power);
+ }
}
-static int allocate_power(struct thermal_zone_device *tz,
- int control_temp)
+static int allocate_power(struct thermal_zone_device *tz, int control_temp)
{
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
- const struct thermal_trip *trip_max_desired_temperature =
- params->trip_max_desired_temperature;
- u32 *req_power, *max_power, *granted_power, *extra_actor_power;
- u32 *weighted_req_power;
- u32 total_req_power, max_allocatable_power, total_weighted_req_power;
- u32 total_granted_power, power_range;
- int i, num_actors, total_weight, ret = 0;
-
- num_actors = 0;
- total_weight = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if ((instance->trip == trip_max_desired_temperature) &&
- cdev_is_power_actor(instance->cdev)) {
- num_actors++;
- total_weight += instance->weight;
- }
- }
+ unsigned int num_actors = params->num_actors;
+ struct power_actor *power = params->power;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
+ u32 total_weighted_req_power = 0;
+ u32 max_allocatable_power = 0;
+ u32 total_granted_power = 0;
+ u32 total_req_power = 0;
+ u32 power_range, weight;
+ int i = 0, ret;
if (!num_actors)
return -ENODEV;
- /*
- * We need to allocate five arrays of the same size:
- * req_power, max_power, granted_power, extra_actor_power and
- * weighted_req_power. They are going to be needed until this
- * function returns. Allocate them all in one go to simplify
- * the allocation and deallocation logic.
- */
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
- req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
- if (!req_power)
- return -ENOMEM;
-
- max_power = &req_power[num_actors];
- granted_power = &req_power[2 * num_actors];
- extra_actor_power = &req_power[3 * num_actors];
- weighted_req_power = &req_power[4 * num_actors];
-
- i = 0;
- total_weighted_req_power = 0;
- total_req_power = 0;
- max_allocatable_power = 0;
+ /* Clean all buffers for new power estimations */
+ memset(power, 0, params->buffer_size);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- int weight;
- struct thermal_cooling_device *cdev = instance->cdev;
+ struct power_actor *pa = &power[i];
- if (instance->trip != trip_max_desired_temperature)
+ if (!power_actor_is_valid(params, instance))
continue;
- if (!cdev_is_power_actor(cdev))
- continue;
+ cdev = instance->cdev;
- if (cdev->ops->get_requested_power(cdev, &req_power[i]))
+ ret = cdev->ops->get_requested_power(cdev, &pa->req_power);
+ if (ret)
continue;
- if (!total_weight)
+ if (!params->total_weight)
weight = 1 << FRAC_BITS;
else
weight = instance->weight;
- weighted_req_power[i] = frac_to_int(weight * req_power[i]);
+ pa->weighted_req_power = frac_to_int(weight * pa->req_power);
- if (cdev->ops->state2power(cdev, instance->lower,
- &max_power[i]))
+ ret = cdev->ops->state2power(cdev, instance->lower,
+ &pa->max_power);
+ if (ret)
continue;
- total_req_power += req_power[i];
- max_allocatable_power += max_power[i];
- total_weighted_req_power += weighted_req_power[i];
+ total_req_power += pa->req_power;
+ max_allocatable_power += pa->max_power;
+ total_weighted_req_power += pa->weighted_req_power;
i++;
}
power_range = pid_controller(tz, control_temp, max_allocatable_power);
- divvy_up_power(weighted_req_power, max_power, num_actors,
- total_weighted_req_power, power_range, granted_power,
- extra_actor_power);
+ divvy_up_power(power, num_actors, total_weighted_req_power,
+ power_range);
- total_granted_power = 0;
i = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip_max_desired_temperature)
- continue;
+ struct power_actor *pa = &power[i];
- if (!cdev_is_power_actor(instance->cdev))
+ if (!power_actor_is_valid(params, instance))
continue;
power_actor_set_power(instance->cdev, instance,
- granted_power[i]);
- total_granted_power += granted_power[i];
+ pa->granted_power);
+ total_granted_power += pa->granted_power;
+ trace_thermal_power_actor(tz, i, pa->req_power,
+ pa->granted_power);
i++;
}
- trace_thermal_power_allocator(tz, req_power, total_req_power,
- granted_power, total_granted_power,
+ trace_thermal_power_allocator(tz, total_req_power, total_granted_power,
num_actors, power_range,
max_allocatable_power, tz->temperature,
control_temp - tz->temperature);
- kfree(req_power);
-
- return ret;
+ return 0;
}
/**
@@ -531,13 +517,13 @@ static void get_governor_trips(struct thermal_zone_device *tz,
if (last_passive) {
params->trip_switch_on = first_passive;
- params->trip_max_desired_temperature = last_passive;
+ params->trip_max = last_passive;
} else if (first_passive) {
params->trip_switch_on = NULL;
- params->trip_max_desired_temperature = first_passive;
+ params->trip_max = first_passive;
} else {
params->trip_switch_on = NULL;
- params->trip_max_desired_temperature = last_active;
+ params->trip_max = last_active;
}
}
@@ -549,19 +535,19 @@ static void reset_pid_controller(struct power_allocator_params *params)
static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
{
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
u32 req_power;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- struct thermal_cooling_device *cdev = instance->cdev;
-
- if (instance->trip != params->trip_max_desired_temperature ||
- (!cdev_is_power_actor(instance->cdev)))
+ if (!power_actor_is_valid(params, instance))
continue;
+ cdev = instance->cdev;
+
instance->target = 0;
- mutex_lock(&instance->cdev->lock);
+ mutex_lock(&cdev->lock);
/*
* Call for updating the cooling devices local stats and avoid
* periods of dozen of seconds when those have not been
@@ -570,9 +556,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
cdev->ops->get_requested_power(cdev, &req_power);
if (update)
- __thermal_cdev_update(instance->cdev);
+ __thermal_cdev_update(cdev);
- mutex_unlock(&instance->cdev->lock);
+ mutex_unlock(&cdev->lock);
}
}
@@ -580,30 +566,99 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
* check_power_actors() - Check all cooling devices and warn when they are
* not power actors
* @tz: thermal zone to operate on
+ * @params: power allocator private data
*
* Check all cooling devices in the @tz and warn every time they are missing
* power actor API. The warning should help to investigate the issue, which
* could be e.g. lack of Energy Model for a given device.
*
- * Return: 0 on success, -EINVAL if any cooling device does not implement
- * the power actor API.
+ * If all of the cooling devices currently attached to @tz implement the power
+ * actor API, return the number of them (which may be 0, because some cooling
+ * devices may be attached later). Otherwise, return -EINVAL.
*/
-static int check_power_actors(struct thermal_zone_device *tz)
+static int check_power_actors(struct thermal_zone_device *tz,
+ struct power_allocator_params *params)
{
struct thermal_instance *instance;
int ret = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != params->trip_max)
+ continue;
+
if (!cdev_is_power_actor(instance->cdev)) {
dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
instance->cdev->type);
- ret = -EINVAL;
+ return -EINVAL;
}
+ ret++;
}
return ret;
}
+static int allocate_actors_buffer(struct power_allocator_params *params,
+ int num_actors)
+{
+ int ret;
+
+ kfree(params->power);
+
+ /* There might be no cooling devices yet. */
+ if (!num_actors) {
+ ret = -EINVAL;
+ goto clean_state;
+ }
+
+ params->power = kcalloc(num_actors, sizeof(struct power_actor),
+ GFP_KERNEL);
+ if (!params->power) {
+ ret = -ENOMEM;
+ goto clean_state;
+ }
+
+ params->num_actors = num_actors;
+ params->buffer_size = num_actors * sizeof(struct power_actor);
+
+ return 0;
+
+clean_state:
+ params->num_actors = 0;
+ params->buffer_size = 0;
+ params->power = NULL;
+ return ret;
+}
+
+static void power_allocator_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason)
+{
+ struct power_allocator_params *params = tz->governor_data;
+ struct thermal_instance *instance;
+ int num_actors = 0;
+
+ switch (reason) {
+ case THERMAL_TZ_BIND_CDEV:
+ case THERMAL_TZ_UNBIND_CDEV:
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ if (power_actor_is_valid(params, instance))
+ num_actors++;
+
+ if (num_actors == params->num_actors)
+ return;
+
+ allocate_actors_buffer(params, num_actors);
+ break;
+ case THERMAL_INSTANCE_WEIGHT_CHANGED:
+ params->total_weight = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ if (power_actor_is_valid(params, instance))
+ params->total_weight += instance->weight;
+ break;
+ default:
+ break;
+ }
+}
+
/**
* power_allocator_bind() - bind the power_allocator governor to a thermal zone
* @tz: thermal zone to bind it to
@@ -616,17 +671,34 @@ static int check_power_actors(struct thermal_zone_device *tz)
*/
static int power_allocator_bind(struct thermal_zone_device *tz)
{
- int ret;
struct power_allocator_params *params;
-
- ret = check_power_actors(tz);
- if (ret)
- return ret;
+ int ret;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
+ get_governor_trips(tz, params);
+ if (!params->trip_max) {
+ dev_warn(&tz->device, "power_allocator: missing trip_max\n");
+ kfree(params);
+ return -EINVAL;
+ }
+
+ ret = check_power_actors(tz, params);
+ if (ret < 0) {
+ dev_warn(&tz->device, "power_allocator: binding failed\n");
+ kfree(params);
+ return ret;
+ }
+
+ ret = allocate_actors_buffer(params, ret);
+ if (ret) {
+ dev_warn(&tz->device, "power_allocator: allocation failed\n");
+ kfree(params);
+ return ret;
+ }
+
if (!tz->tzp) {
tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
if (!tz->tzp) {
@@ -640,14 +712,9 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
if (!tz->tzp->sustainable_power)
dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
- get_governor_trips(tz, params);
-
- if (params->trip_max_desired_temperature) {
- int temp = params->trip_max_desired_temperature->temperature;
-
- estimate_pid_constants(tz, tz->tzp->sustainable_power,
- params->trip_switch_on, temp);
- }
+ estimate_pid_constants(tz, tz->tzp->sustainable_power,
+ params->trip_switch_on,
+ params->trip_max->temperature);
reset_pid_controller(params);
@@ -656,6 +723,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free_params:
+ kfree(params->power);
kfree(params);
return ret;
@@ -672,6 +740,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
tz->tzp = NULL;
}
+ kfree(params->power);
kfree(tz->governor_data);
tz->governor_data = NULL;
}
@@ -688,7 +757,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
* We get called for every trip point but we only need to do
* our calculations once
*/
- if (trip != params->trip_max_desired_temperature)
+ if (trip != params->trip_max)
return 0;
trip = params->trip_switch_on;
@@ -702,7 +771,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
tz->passive = 1;
- return allocate_power(tz, params->trip_max_desired_temperature->temperature);
+ return allocate_power(tz, params->trip_max->temperature);
}
static struct thermal_governor thermal_gov_power_allocator = {
@@ -710,5 +779,6 @@ static struct thermal_governor thermal_gov_power_allocator = {
.bind_to_tz = power_allocator_bind,
.unbind_from_tz = power_allocator_unbind,
.throttle = power_allocator_throttle,
+ .update_tz = power_allocator_update_tz,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index ecd7e07eece0..b43953b5539f 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -85,7 +85,7 @@ config INTEL_BXT_PMIC_THERMAL
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
depends on X86 && PCI
- select THERMAL_ACPI if ACPI
+ select ACPI_THERMAL_LIB if ACPI
help
Enable this to support thermal reporting on certain intel PCHs.
Thermal reporting device will provide temperature reading,
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 300ea53e9b33..e76b13e44d03 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -9,7 +9,7 @@ config INT340X_THERMAL
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
- select THERMAL_ACPI
+ select ACPI_THERMAL_LIB
select INTEL_SOC_DTS_IOSF_CORE
select INTEL_TCC
select PROC_THERMAL_MMIO_RAPL if POWERCAP
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index a03b67579dd9..3e4bfe817fac 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -225,7 +225,8 @@ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
static int int340x_update_one_trip(struct thermal_trip *trip, void *arg)
{
- struct acpi_device *zone_adev = arg;
+ struct int34x_thermal_zone *int34x_zone = arg;
+ struct acpi_device *zone_adev = int34x_zone->adev;
int temp, err;
switch (trip->type) {
@@ -249,14 +250,15 @@ static int int340x_update_one_trip(struct thermal_trip *trip, void *arg)
if (err)
temp = THERMAL_TEMP_INVALID;
- trip->temperature = temp;
+ thermal_zone_set_trip_temp(int34x_zone->zone, trip, temp);
+
return 0;
}
void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone)
{
thermal_zone_for_each_trip(int34x_zone->zone, int340x_update_one_trip,
- int34x_zone->adev);
+ int34x_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_update_trips);
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index c69db6c90869..22445403b520 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -24,6 +24,7 @@
#include <linux/bitops.h>
#include <linux/cpufeature.h>
#include <linux/cpumask.h>
+#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -347,6 +348,52 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
}
+/* Caller must hold hfi_instance_lock. */
+static void hfi_enable(void)
+{
+ u64 msr_val;
+
+ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+}
+
+static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
+{
+ phys_addr_t hw_table_pa;
+ u64 msr_val;
+
+ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+}
+
+/* Caller must hold hfi_instance_lock. */
+static void hfi_disable(void)
+{
+ u64 msr_val;
+ int i;
+
+ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+
+ /*
+ * Wait for hardware to acknowledge the disabling of HFI. Some
+ * processors may not do it. Wait for ~2ms. This is a reasonable
+ * time for hardware to complete any pending actions on the HFI
+ * memory.
+ */
+ for (i = 0; i < 2000; i++) {
+ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+ if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
+ break;
+
+ udelay(1);
+ cpu_relax();
+ }
+}
+
/**
* intel_hfi_online() - Enable HFI on @cpu
* @cpu: CPU in which the HFI will be enabled
@@ -364,8 +411,6 @@ void intel_hfi_online(unsigned int cpu)
{
struct hfi_instance *hfi_instance;
struct hfi_cpu_info *info;
- phys_addr_t hw_table_pa;
- u64 msr_val;
u16 die_id;
/* Nothing to do if hfi_instances are missing. */
@@ -392,25 +437,26 @@ void intel_hfi_online(unsigned int cpu)
/*
* Now check if the HFI instance of the package/die of @cpu has been
* initialized (by checking its header). In such case, all we have to
- * do is to add @cpu to this instance's cpumask.
+ * do is to add @cpu to this instance's cpumask and enable the instance
+ * if needed.
*/
mutex_lock(&hfi_instance_lock);
- if (hfi_instance->hdr) {
- cpumask_set_cpu(cpu, hfi_instance->cpus);
- goto unlock;
- }
+ if (hfi_instance->hdr)
+ goto enable;
/*
* Hardware is programmed with the physical address of the first page
* frame of the table. Hence, the allocated memory must be page-aligned.
+ *
+ * Some processors do not forget the initial address of the HFI table
+ * even after having been reprogrammed. Keep using the same pages. Do
+ * not free them.
*/
hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
GFP_KERNEL | __GFP_ZERO);
if (!hfi_instance->hw_table)
goto unlock;
- hw_table_pa = virt_to_phys(hfi_instance->hw_table);
-
/*
* Allocate memory to keep a local copy of the table that
* hardware generates.
@@ -420,31 +466,20 @@ void intel_hfi_online(unsigned int cpu)
if (!hfi_instance->local_table)
goto free_hw_table;
- /*
- * Program the address of the feedback table of this die/package. On
- * some processors, hardware remembers the old address of the HFI table
- * even after having been reprogrammed and re-enabled. Thus, do not free
- * the pages allocated for the table or reprogram the hardware with a
- * new base address. Namely, program the hardware only once.
- */
- msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
-
init_hfi_instance(hfi_instance);
INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
raw_spin_lock_init(&hfi_instance->table_lock);
raw_spin_lock_init(&hfi_instance->event_lock);
+enable:
cpumask_set_cpu(cpu, hfi_instance->cpus);
- /*
- * Enable the hardware feedback interface and never disable it. See
- * comment on programming the address of the table.
- */
- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
- msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ /* Enable this HFI instance if this is its first online CPU. */
+ if (cpumask_weight(hfi_instance->cpus) == 1) {
+ hfi_set_hw_table(hfi_instance);
+ hfi_enable();
+ }
unlock:
mutex_unlock(&hfi_instance_lock);
@@ -484,6 +519,10 @@ void intel_hfi_offline(unsigned int cpu)
mutex_lock(&hfi_instance_lock);
cpumask_clear_cpu(cpu, hfi_instance->cpus);
+
+ if (!cpumask_weight(hfi_instance->cpus))
+ hfi_disable();
+
mutex_unlock(&hfi_instance_lock);
}
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 133098dc0854..99ca0c7bc41c 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -127,7 +127,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
if (!IS_ERR(tzd))
break;
- if (PTR_ERR(tzd) != ENODEV)
+ if (PTR_ERR(tzd) != -ENODEV)
continue;
return dev_err_probe(dev, PTR_ERR(tzd), "failed to register");
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 123ec81e1943..6482513bfe66 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -138,12 +138,10 @@ enum soc_type {
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
* driver
- * @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
* @irq: irq number of the TMU controller.
* @soc: id of the SOC type.
- * @irq_work: pointer to the irq work structure.
* @lock: lock to implement synchronization.
* @clk: pointer to the clock structure.
* @clk_sec: pointer to the clock structure for accessing the base_second.
@@ -159,13 +157,13 @@ enum soc_type {
* @reference_voltage: reference voltage of amplifier
* in the positive-TC generator block
* 0 < reference_voltage <= 31
- * @regulator: pointer to the TMU regulator structure.
- * @reg_conf: pointer to structure to register with core thermal.
* @tzd: pointer to thermal_zone_device structure
- * @ntrip: number of supported trip points.
* @enabled: current status of TMU device
- * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
- * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
+ * @tmu_set_low_temp: SoC specific method to set trip (falling threshold)
+ * @tmu_set_high_temp: SoC specific method to set trip (rising threshold)
+ * @tmu_set_crit_temp: SoC specific method to set critical temperature
+ * @tmu_disable_low: SoC specific method to disable an interrupt (falling threshold)
+ * @tmu_disable_high: SoC specific method to disable an interrupt (rising threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -173,12 +171,10 @@ enum soc_type {
* @tmu_clear_irqs: SoC specific TMU interrupts clearing method
*/
struct exynos_tmu_data {
- int id;
void __iomem *base;
void __iomem *base_second;
int irq;
enum soc_type soc;
- struct work_struct irq_work;
struct mutex lock;
struct clk *clk, *clk_sec, *sclk;
u32 cal_type;
@@ -188,15 +184,14 @@ struct exynos_tmu_data {
u16 temp_error1, temp_error2;
u8 gain;
u8 reference_voltage;
- struct regulator *regulator;
struct thermal_zone_device *tzd;
- unsigned int ntrip;
bool enabled;
- void (*tmu_set_trip_temp)(struct exynos_tmu_data *data, int trip,
- u8 temp);
- void (*tmu_set_trip_hyst)(struct exynos_tmu_data *data, int trip,
- u8 temp, u8 hyst);
+ void (*tmu_set_low_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_set_high_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_set_crit_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_disable_low)(struct exynos_tmu_data *data);
+ void (*tmu_disable_high)(struct exynos_tmu_data *data);
void (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
int (*tmu_read)(struct exynos_tmu_data *data);
@@ -258,25 +253,8 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tzd = data->tzd;
- int num_trips = thermal_zone_get_num_trips(tzd);
unsigned int status;
- int ret = 0, temp;
-
- ret = thermal_zone_get_crit_temp(tzd, &temp);
- if (ret && data->soc != SOC_ARCH_EXYNOS5433) { /* FIXME */
- dev_err(&pdev->dev,
- "No CRITICAL trip point defined in device tree!\n");
- goto out;
- }
-
- if (num_trips > data->ntrip) {
- dev_info(&pdev->dev,
- "More trip points than supported by this TMU.\n");
- dev_info(&pdev->dev,
- "%d trip points should be configured in polling mode.\n",
- num_trips - data->ntrip);
- }
+ int ret = 0;
mutex_lock(&data->lock);
clk_enable(data->clk);
@@ -287,34 +265,44 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
if (!status) {
ret = -EBUSY;
} else {
- int i, ntrips =
- min_t(int, num_trips, data->ntrip);
-
data->tmu_initialize(pdev);
+ data->tmu_clear_irqs(data);
+ }
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < ntrips; i++) {
+ if (!IS_ERR(data->clk_sec))
+ clk_disable(data->clk_sec);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
- struct thermal_trip trip;
+ return ret;
+}
- ret = thermal_zone_get_trip(tzd, i, &trip);
- if (ret)
- goto err;
+static int exynos_thermal_zone_configure(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tzd = data->tzd;
+ int ret, temp;
- data->tmu_set_trip_temp(data, i, trip.temperature / MCELSIUS);
- data->tmu_set_trip_hyst(data, i, trip.temperature / MCELSIUS,
- trip.hysteresis / MCELSIUS);
- }
+ ret = thermal_zone_get_crit_temp(tzd, &temp);
+ if (ret) {
+ /* FIXME: Remove this special case */
+ if (data->soc == SOC_ARCH_EXYNOS5433)
+ return 0;
- data->tmu_clear_irqs(data);
+ dev_err(&pdev->dev,
+ "No CRITICAL trip point defined in device tree!\n");
+ return ret;
}
-err:
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ data->tmu_set_crit_temp(data, temp / MCELSIUS);
+
clk_disable(data->clk);
mutex_unlock(&data->lock);
- if (!IS_ERR(data->clk_sec))
- clk_disable(data->clk_sec);
-out:
- return ret;
+
+ return 0;
}
static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
@@ -347,30 +335,74 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
mutex_unlock(&data->lock);
}
-static void exynos4210_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip_id, u8 temp)
+static void exynos_tmu_update_bit(struct exynos_tmu_data *data, int reg_off,
+ int bit_off, bool enable)
+{
+ u32 interrupt_en;
+
+ interrupt_en = readl(data->base + reg_off);
+ if (enable)
+ interrupt_en |= BIT(bit_off);
+ else
+ interrupt_en &= ~BIT(bit_off);
+ writel(interrupt_en, data->base + reg_off);
+}
+
+static void exynos_tmu_update_temp(struct exynos_tmu_data *data, int reg_off,
+ int bit_off, u8 temp)
{
- struct thermal_trip trip;
- u8 ref, th_code;
+ u16 tmu_temp_mask;
+ u32 th;
- if (thermal_zone_get_trip(data->tzd, 0, &trip))
- return;
+ tmu_temp_mask =
+ (data->soc == SOC_ARCH_EXYNOS7) ? EXYNOS7_TMU_TEMP_MASK
+ : EXYNOS_TMU_TEMP_MASK;
- ref = trip.temperature / MCELSIUS;
+ th = readl(data->base + reg_off);
+ th &= ~(tmu_temp_mask << bit_off);
+ th |= temp_to_code(data, temp) << bit_off;
+ writel(th, data->base + reg_off);
+}
- if (trip_id == 0) {
- th_code = temp_to_code(data, ref);
- writeb(th_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
- }
+static void exynos4210_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Failing thresholds are not supported on Exynos 4210.
+ * We use polling instead.
+ */
+}
+
+static void exynos4210_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ temp = temp_to_code(data, temp);
+ writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 4);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true);
+}
- temp -= ref;
- writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip_id * 4);
+static void exynos4210_tmu_disable_low(struct exynos_tmu_data *data)
+{
+ /* Again, this is handled by polling. */
}
-/* failing thresholds are not supported on Exynos4210 */
-static void exynos4210_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos4210_tmu_disable_high(struct exynos_tmu_data *data)
{
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, false);
+}
+
+static void exynos4210_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Hardware critical temperature handling is not supported on Exynos 4210.
+ * We still set the critical temperature threshold, but this is only to
+ * make sure it is handled as soon as possible. It is just a normal interrupt.
+ */
+
+ temp = temp_to_code(data, temp);
+ writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 12);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 12, true);
}
static void exynos4210_tmu_initialize(struct platform_device *pdev)
@@ -378,35 +410,35 @@ static void exynos4210_tmu_initialize(struct platform_device *pdev)
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
+
+ writeb(0, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
}
-static void exynos4412_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos4412_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- u32 th, con;
-
- th = readl(data->base + EXYNOS_THD_TEMP_RISE);
- th &= ~(0xff << 8 * trip);
- th |= temp_to_code(data, temp) << 8 * trip;
- writel(th, data->base + EXYNOS_THD_TEMP_RISE);
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_FALL, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, true);
+}
- if (trip == 3) {
- con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
- con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
- writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
- }
+static void exynos4412_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 8, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true);
}
-static void exynos4412_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos4412_tmu_disable_low(struct exynos_tmu_data *data)
{
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, false);
+}
- th = readl(data->base + EXYNOS_THD_TEMP_FALL);
- th &= ~(0xff << 8 * trip);
- if (hyst)
- th |= temp_to_code(data, temp - hyst) << 8 * trip;
- writel(th, data->base + EXYNOS_THD_TEMP_FALL);
+static void exynos4412_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 24, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL,
+ EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true);
}
static void exynos4412_tmu_initialize(struct platform_device *pdev)
@@ -436,44 +468,39 @@ static void exynos4412_tmu_initialize(struct platform_device *pdev)
sanitize_temp_error(data, trim_info);
}
-static void exynos5433_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos5433_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- unsigned int reg_off, j;
- u32 th;
-
- if (trip > 3) {
- reg_off = EXYNOS5433_THD_TEMP_RISE7_4;
- j = trip - 4;
- } else {
- reg_off = EXYNOS5433_THD_TEMP_RISE3_0;
- j = trip;
- }
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_FALL3_0, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, true);
+}
- th = readl(data->base + reg_off);
- th &= ~(0xff << j * 8);
- th |= (temp_to_code(data, temp) << j * 8);
- writel(th, data->base + reg_off);
+static void exynos5433_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE3_0, 8, temp);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true);
}
-static void exynos5433_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos5433_tmu_disable_low(struct exynos_tmu_data *data)
{
- unsigned int reg_off, j;
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, false);
+}
- if (trip > 3) {
- reg_off = EXYNOS5433_THD_TEMP_FALL7_4;
- j = trip - 4;
- } else {
- reg_off = EXYNOS5433_THD_TEMP_FALL3_0;
- j = trip;
- }
+static void exynos5433_tmu_disable_high(struct exynos_tmu_data *data)
+{
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false);
+}
- th = readl(data->base + reg_off);
- th &= ~(0xff << j * 8);
- th |= (temp_to_code(data, temp - hyst) << j * 8);
- writel(th, data->base + reg_off);
+static void exynos5433_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE7_4, 24, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL,
+ EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true);
}
static void exynos5433_tmu_initialize(struct platform_device *pdev)
@@ -509,34 +536,41 @@ static void exynos5433_tmu_initialize(struct platform_device *pdev)
cal_type ? 2 : 1);
}
-static void exynos7_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos7_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- unsigned int reg_off, bit_off;
- u32 th;
-
- reg_off = ((7 - trip) / 2) * 4;
- bit_off = ((8 - trip) % 2);
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_FALL7_6 + 12, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, true);
+}
- th = readl(data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
- th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
- th |= temp_to_code(data, temp) << (16 * bit_off);
- writel(th, data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+static void exynos7_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 12, 16, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true);
}
-static void exynos7_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos7_tmu_disable_low(struct exynos_tmu_data *data)
{
- unsigned int reg_off, bit_off;
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, false);
+}
- reg_off = ((7 - trip) / 2) * 4;
- bit_off = ((8 - trip) % 2);
+static void exynos7_tmu_disable_high(struct exynos_tmu_data *data)
+{
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false);
+}
- th = readl(data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
- th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
- th |= temp_to_code(data, temp - hyst) << (16 * bit_off);
- writel(th, data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
+static void exynos7_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Like Exynos 4210, Exynos 7 does not seem to support critical temperature
+ * handling in hardware. Again, we still set a separate interrupt for it.
+ */
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 0, 16, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true);
}
static void exynos7_tmu_initialize(struct platform_device *pdev)
@@ -551,95 +585,51 @@ static void exynos7_tmu_initialize(struct platform_device *pdev)
static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, i;
+ unsigned int con;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS_TMU_INTEN_RISE0_SHIFT + i * 4));
- }
-
- if (data->soc != SOC_ARCH_EXYNOS4210)
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- } else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- }
+ if (on)
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ else
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
- writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, pd_det_en, i;
+ unsigned int con, pd_det_en;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
- }
-
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- } else
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ if (on)
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ else
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
- writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos7_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, i;
+ unsigned int con;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
- }
-
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ con |= BIT(EXYNOS7_PD_DET_EN_SHIFT);
} else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ con &= ~BIT(EXYNOS7_PD_DET_EN_SHIFT);
}
- writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
@@ -766,10 +756,9 @@ static int exynos7_tmu_read(struct exynos_tmu_data *data)
EXYNOS7_TMU_TEMP_MASK;
}
-static void exynos_tmu_work(struct work_struct *work)
+static irqreturn_t exynos_tmu_threaded_irq(int irq, void *id)
{
- struct exynos_tmu_data *data = container_of(work,
- struct exynos_tmu_data, irq_work);
+ struct exynos_tmu_data *data = id;
thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED);
@@ -781,7 +770,8 @@ static void exynos_tmu_work(struct work_struct *work)
clk_disable(data->clk);
mutex_unlock(&data->lock);
- enable_irq(data->irq);
+
+ return IRQ_HANDLED;
}
static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
@@ -815,16 +805,6 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
writel(val_irq, data->base + tmu_intclear);
}
-static irqreturn_t exynos_tmu_irq(int irq, void *id)
-{
- struct exynos_tmu_data *data = id;
-
- disable_irq_nosync(irq);
- schedule_work(&data->irq_work);
-
- return IRQ_HANDLED;
-}
-
static const struct of_device_id exynos_tmu_match[] = {
{
.compatible = "samsung,exynos3250-tmu",
@@ -866,10 +846,6 @@ static int exynos_map_dt_data(struct platform_device *pdev)
if (!data || !pdev->dev.of_node)
return -ENODEV;
- data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
- if (data->id < 0)
- data->id = 0;
-
data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (data->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
@@ -891,13 +867,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
switch (data->soc) {
case SOC_ARCH_EXYNOS4210:
- data->tmu_set_trip_temp = exynos4210_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos4210_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos4210_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos4210_tmu_set_high_temp;
+ data->tmu_disable_low = exynos4210_tmu_disable_low;
+ data->tmu_disable_high = exynos4210_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos4210_tmu_set_crit_temp;
data->tmu_initialize = exynos4210_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4210_tmu_read;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 4;
data->gain = 15;
data->reference_voltage = 7;
data->efuse_value = 55;
@@ -910,14 +888,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
case SOC_ARCH_EXYNOS5260:
case SOC_ARCH_EXYNOS5420:
case SOC_ARCH_EXYNOS5420_TRIMINFO:
- data->tmu_set_trip_temp = exynos4412_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos4412_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos4412_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos4412_tmu_set_high_temp;
+ data->tmu_disable_low = exynos4412_tmu_disable_low;
+ data->tmu_disable_high = exynos4210_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos4412_tmu_set_crit_temp;
data->tmu_initialize = exynos4412_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 4;
data->gain = 8;
data->reference_voltage = 16;
data->efuse_value = 55;
@@ -929,14 +909,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->max_efuse_value = 100;
break;
case SOC_ARCH_EXYNOS5433:
- data->tmu_set_trip_temp = exynos5433_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos5433_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos5433_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos5433_tmu_set_high_temp;
+ data->tmu_disable_low = exynos5433_tmu_disable_low;
+ data->tmu_disable_high = exynos5433_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos5433_tmu_set_crit_temp;
data->tmu_initialize = exynos5433_tmu_initialize;
data->tmu_control = exynos5433_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 8;
data->gain = 8;
if (res.start == EXYNOS5433_G3D_BASE)
data->reference_voltage = 23;
@@ -947,14 +929,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->max_efuse_value = 150;
break;
case SOC_ARCH_EXYNOS7:
- data->tmu_set_trip_temp = exynos7_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos7_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos7_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos7_tmu_set_high_temp;
+ data->tmu_disable_low = exynos7_tmu_disable_low;
+ data->tmu_disable_high = exynos7_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos7_tmu_set_crit_temp;
data->tmu_initialize = exynos7_tmu_initialize;
data->tmu_control = exynos7_tmu_control;
data->tmu_read = exynos7_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 8;
data->gain = 9;
data->reference_voltage = 17;
data->efuse_value = 75;
@@ -990,9 +974,32 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return 0;
}
+static int exynos_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct exynos_tmu_data *data = thermal_zone_device_priv(tz);
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ if (low > INT_MIN)
+ data->tmu_set_low_temp(data, low / MCELSIUS);
+ else
+ data->tmu_disable_low(data);
+ if (high < INT_MAX)
+ data->tmu_set_high_temp(data, high / MCELSIUS);
+ else
+ data->tmu_disable_high(data);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return 0;
+}
+
static const struct thermal_zone_device_ops exynos_sensor_ops = {
.get_temp = exynos_get_temp,
.set_emul_temp = exynos_tmu_set_emulation,
+ .set_trips = exynos_set_trips,
};
static int exynos_tmu_probe(struct platform_device *pdev)
@@ -1013,44 +1020,40 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* TODO: Add regulator as an SOC feature, so that regulator enable
* is a compulsory call.
*/
- data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
- if (!IS_ERR(data->regulator)) {
- ret = regulator_enable(data->regulator);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable vtmu\n");
- return ret;
- }
- } else {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vtmu");
+ switch (ret) {
+ case 0:
+ case -ENODEV:
+ break;
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+ default:
+ dev_err(&pdev->dev, "Failed to get enabled regulator: %d\n",
+ ret);
+ return ret;
}
ret = exynos_map_dt_data(pdev);
if (ret)
- goto err_sensor;
-
- INIT_WORK(&data->irq_work, exynos_tmu_work);
+ return ret;
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
- ret = PTR_ERR(data->clk);
- goto err_sensor;
+ return PTR_ERR(data->clk);
}
data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
if (IS_ERR(data->clk_sec)) {
if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
dev_err(&pdev->dev, "Failed to get triminfo clock\n");
- ret = PTR_ERR(data->clk_sec);
- goto err_sensor;
+ return PTR_ERR(data->clk_sec);
}
} else {
ret = clk_prepare(data->clk_sec);
if (ret) {
dev_err(&pdev->dev, "Failed to get clock\n");
- goto err_sensor;
+ return ret;
}
}
@@ -1080,10 +1083,12 @@ static int exynos_tmu_probe(struct platform_device *pdev)
break;
}
- /*
- * data->tzd must be registered before calling exynos_tmu_initialize(),
- * requesting irq and calling exynos_tmu_control().
- */
+ ret = exynos_tmu_initialize(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ goto err_sclk;
+ }
+
data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
@@ -1094,14 +1099,17 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_sclk;
}
- ret = exynos_tmu_initialize(pdev);
+ ret = exynos_thermal_zone_configure(pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ dev_err(&pdev->dev, "Failed to configure the thermal zone\n");
goto err_sclk;
}
- ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
- IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL,
+ exynos_tmu_threaded_irq,
+ IRQF_TRIGGER_RISING
+ | IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&pdev->dev), data);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
goto err_sclk;
@@ -1117,10 +1125,6 @@ err_clk:
err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
-err_sensor:
- if (!IS_ERR(data->regulator))
- regulator_disable(data->regulator);
-
return ret;
}
@@ -1134,9 +1138,6 @@ static void exynos_tmu_remove(struct platform_device *pdev)
clk_unprepare(data->clk);
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
-
- if (!IS_ERR(data->regulator))
- regulator_disable(data->regulator);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index f989b55a8aa8..6a8e386dbc8d 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -606,6 +606,18 @@ static const struct ths_thermal_chip sun50i_h6_ths = {
.calc_temp = sun8i_ths_calc_temp,
};
+static const struct ths_thermal_chip sun20i_d1_ths = {
+ .sensor_num = 1,
+ .has_bus_clk_reset = true,
+ .offset = 188552,
+ .scale = 673,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
{ .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
@@ -614,6 +626,7 @@ static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun50i-a100-ths", .data = &sun50i_a100_ths },
{ .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
{ .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
+ { .compatible = "allwinner,sun20i-d1-ths", .data = &sun20i_d1_ths },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, of_ths_match);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 9c17d35ccbbd..fa88d8707241 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -37,8 +37,6 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
-static atomic_t in_suspend;
-
static struct thermal_governor *def_governor;
/*
@@ -203,9 +201,6 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *tz,
mutex_lock(&thermal_governor_lock);
mutex_lock(&tz->lock);
- if (!device_is_registered(&tz->device))
- goto exit;
-
gov = __find_governor(strim(policy));
if (!gov)
goto exit;
@@ -314,21 +309,43 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz,
def_governor->throttle(tz, trip);
}
-void thermal_zone_device_critical(struct thermal_zone_device *tz)
+void thermal_governor_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason)
+{
+ if (!tz->governor || !tz->governor->update_tz)
+ return;
+
+ tz->governor->update_tz(tz, reason);
+}
+
+static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown)
{
/*
* poweroff_delay_ms must be a carefully profiled positive value.
* Its a must for forced_emergency_poweroff_work to be scheduled.
*/
int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
+ const char *msg = "Temperature too high";
- dev_emerg(&tz->device, "%s: critical temperature reached, "
- "shutting down\n", tz->type);
+ dev_emerg(&tz->device, "%s: critical temperature reached\n", tz->type);
+
+ if (shutdown)
+ hw_protection_shutdown(msg, poweroff_delay_ms);
+ else
+ hw_protection_reboot(msg, poweroff_delay_ms);
+}
- hw_protection_shutdown("Temperature too high", poweroff_delay_ms);
+void thermal_zone_device_critical(struct thermal_zone_device *tz)
+{
+ thermal_zone_device_halt(tz, true);
}
EXPORT_SYMBOL(thermal_zone_device_critical);
+void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz)
+{
+ thermal_zone_device_halt(tz, false);
+}
+
static void handle_critical_trips(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
@@ -345,22 +362,51 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
}
static void handle_thermal_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+ struct thermal_trip *trip)
{
if (trip->temperature == THERMAL_TEMP_INVALID)
return;
- if (tz->last_temperature != THERMAL_TEMP_INVALID) {
- if (tz->last_temperature < trip->temperature &&
- tz->temperature >= trip->temperature)
+ if (tz->last_temperature == THERMAL_TEMP_INVALID) {
+ /* Initialization. */
+ trip->threshold = trip->temperature;
+ if (tz->temperature >= trip->threshold)
+ trip->threshold -= trip->hysteresis;
+ } else if (tz->last_temperature < trip->threshold) {
+ /*
+ * The trip threshold is equal to the trip temperature, unless
+ * the latter has changed in the meantime. In either case,
+ * the trip is crossed if the current zone temperature is at
+ * least equal to its temperature, but otherwise ensure that
+ * the threshold and the trip temperature will be equal.
+ */
+ if (tz->temperature >= trip->temperature) {
thermal_notify_tz_trip_up(tz->id,
thermal_zone_trip_id(tz, trip),
tz->temperature);
- if (tz->last_temperature >= trip->temperature &&
- tz->temperature < trip->temperature - trip->hysteresis)
+ trip->threshold = trip->temperature - trip->hysteresis;
+ } else {
+ trip->threshold = trip->temperature;
+ }
+ } else {
+ /*
+ * The previous zone temperature was above or equal to the trip
+ * threshold, which would be equal to the "low temperature" of
+ * the trip (its temperature minus its hysteresis), unless the
+ * trip temperature or hysteresis had changed. In either case,
+ * the trip is crossed if the current zone temperature is below
+ * the low temperature of the trip, but otherwise ensure that
+ * the trip threshold will be equal to the low temperature of
+ * the trip.
+ */
+ if (tz->temperature < trip->temperature - trip->hysteresis) {
thermal_notify_tz_trip_down(tz->id,
thermal_zone_trip_id(tz, trip),
tz->temperature);
+ trip->threshold = trip->temperature;
+ } else {
+ trip->threshold = trip->temperature - trip->hysteresis;
+ }
}
if (trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT)
@@ -390,9 +436,20 @@ static void update_temperature(struct thermal_zone_device *tz)
thermal_genl_sampling_temp(tz->id, temp);
}
+static void thermal_zone_device_check(struct work_struct *work)
+{
+ struct thermal_zone_device *tz = container_of(work, struct
+ thermal_zone_device,
+ poll_queue.work);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+}
+
static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
+
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
+
tz->temperature = THERMAL_TEMP_INVALID;
tz->prev_low_trip = -INT_MAX;
tz->prev_high_trip = INT_MAX;
@@ -403,14 +460,9 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
- const struct thermal_trip *trip;
-
- if (atomic_read(&in_suspend))
- return;
+ struct thermal_trip *trip;
- if (WARN_ONCE(!tz->ops->get_temp,
- "'%s' must not be called without 'get_temp' ops set\n",
- __func__))
+ if (tz->suspended)
return;
if (!thermal_zone_device_is_enabled(tz))
@@ -442,12 +494,6 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
return ret;
}
- if (!device_is_registered(&tz->device)) {
- mutex_unlock(&tz->lock);
-
- return -ENODEV;
- }
-
if (tz->ops->change_mode)
ret = tz->ops->change_mode(tz, mode);
@@ -485,24 +531,21 @@ int thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
return tz->mode == THERMAL_DEVICE_ENABLED;
}
+static bool thermal_zone_is_present(struct thermal_zone_device *tz)
+{
+ return !list_empty(&tz->node);
+}
+
void thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
mutex_lock(&tz->lock);
- if (device_is_registered(&tz->device))
+ if (thermal_zone_is_present(tz))
__thermal_zone_device_update(tz, event);
mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
-static void thermal_zone_device_check(struct work_struct *work)
-{
- struct thermal_zone_device *tz = container_of(work, struct
- thermal_zone_device,
- poll_queue.work);
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-}
-
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
@@ -694,6 +737,8 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
list_add_tail(&dev->tz_node, &tz->thermal_instances);
list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
atomic_set(&tz->need_update, 1);
+
+ thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV);
}
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
@@ -752,6 +797,9 @@ int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
list_del(&pos->tz_node);
list_del(&pos->cdev_node);
+
+ thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
+
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
goto unbind;
@@ -793,7 +841,7 @@ static void thermal_release(struct device *dev)
tz = to_thermal_zone(dev);
thermal_zone_destroy_device_groups(tz);
mutex_destroy(&tz->lock);
- kfree(tz);
+ complete(&tz->removal);
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
cdev = to_cooling_device(dev);
@@ -1260,7 +1308,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (!ops) {
+ if (!ops || !ops->get_temp) {
pr_err("Thermal zone device ops not defined\n");
return ERR_PTR(-EINVAL);
}
@@ -1284,8 +1332,10 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
INIT_LIST_HEAD(&tz->thermal_instances);
+ INIT_LIST_HEAD(&tz->node);
ida_init(&tz->ida);
mutex_init(&tz->lock);
+ init_completion(&tz->removal);
id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
@@ -1348,14 +1398,14 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
mutex_lock(&thermal_list_lock);
+ mutex_lock(&tz->lock);
list_add_tail(&tz->node, &thermal_tz_list);
+ mutex_unlock(&tz->lock);
mutex_unlock(&thermal_list_lock);
/* Bind cooling devices for this zone */
bind_tz(tz);
- INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
-
thermal_zone_device_init(tz);
/* Update the new thermal zone and mark it as already updated. */
if (atomic_cmpxchg(&tz->need_update, 1, 0))
@@ -1369,7 +1419,6 @@ unregister:
device_del(&tz->device);
release_device:
put_device(&tz->device);
- tz = NULL;
remove_id:
ida_free(&thermal_tz_ida, id);
free_tzp:
@@ -1439,7 +1488,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_unlock(&thermal_list_lock);
return;
}
+
+ mutex_lock(&tz->lock);
list_del(&tz->node);
+ mutex_unlock(&tz->lock);
/* Unbind all cdevs associated with 'this' thermal zone */
list_for_each_entry(cdev, &thermal_cdev_list, node)
@@ -1456,15 +1508,16 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
ida_free(&thermal_tz_ida, tz->id);
ida_destroy(&tz->ida);
- mutex_lock(&tz->lock);
device_del(&tz->device);
- mutex_unlock(&tz->lock);
kfree(tz->tzp);
put_device(&tz->device);
thermal_notify_tz_delete(tz_id);
+
+ wait_for_completion(&tz->removal);
+ kfree(tz);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
@@ -1506,6 +1559,22 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
+static void thermal_zone_device_resume(struct work_struct *work)
+{
+ struct thermal_zone_device *tz;
+
+ tz = container_of(work, struct thermal_zone_device, poll_queue.work);
+
+ mutex_lock(&tz->lock);
+
+ tz->suspended = false;
+
+ thermal_zone_device_init(tz);
+ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ mutex_unlock(&tz->lock);
+}
+
static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
@@ -1515,17 +1584,43 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
case PM_SUSPEND_PREPARE:
- atomic_set(&in_suspend, 1);
+ mutex_lock(&thermal_list_lock);
+
+ list_for_each_entry(tz, &thermal_tz_list, node) {
+ mutex_lock(&tz->lock);
+
+ tz->suspended = true;
+
+ mutex_unlock(&tz->lock);
+ }
+
+ mutex_unlock(&thermal_list_lock);
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
- atomic_set(&in_suspend, 0);
+ mutex_lock(&thermal_list_lock);
+
list_for_each_entry(tz, &thermal_tz_list, node) {
- thermal_zone_device_init(tz);
- thermal_zone_device_update(tz,
- THERMAL_EVENT_UNSPECIFIED);
+ mutex_lock(&tz->lock);
+
+ cancel_delayed_work(&tz->poll_queue);
+
+ /*
+ * Replace the work function with the resume one, which
+ * will restore the original work function and schedule
+ * the polling work if needed.
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue,
+ thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &tz->poll_queue, 0);
+
+ mutex_unlock(&tz->lock);
}
+
+ mutex_unlock(&thermal_list_lock);
break;
default:
break;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0a3b3ec5120b..4e023d54fd27 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -114,16 +114,19 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *, char *);
int thermal_build_list_of_policies(char *buf);
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event);
+void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz);
+void thermal_governor_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
/* Helpers */
#define for_each_trip(__tz, __trip) \
for (__trip = __tz->trips; __trip - __tz->trips < __tz->num_trips; __trip++)
void __thermal_zone_set_trips(struct thermal_zone_device *tz);
-int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
-int thermal_zone_trip_id(struct thermal_zone_device *tz,
+int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+void thermal_zone_trip_updated(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 69e8ea4aa908..c3982e0f0075 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -82,20 +82,18 @@ EXPORT_SYMBOL(get_thermal_instance);
*/
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
- int ret = -EINVAL;
- int count;
+ const struct thermal_trip *trip;
int crit_temp = INT_MAX;
- struct thermal_trip trip;
+ int ret = -EINVAL;
lockdep_assert_held(&tz->lock);
ret = tz->ops->get_temp(tz, temp);
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
- for (count = 0; count < tz->num_trips; count++) {
- ret = __thermal_zone_get_trip(tz, count, &trip);
- if (!ret && trip.type == THERMAL_TRIP_CRITICAL) {
- crit_temp = trip.temperature;
+ for_each_trip(tz, trip) {
+ if (trip->type == THERMAL_TRIP_CRITICAL) {
+ crit_temp = trip->temperature;
break;
}
}
@@ -139,10 +137,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
goto unlock;
}
- if (device_is_registered(&tz->device))
- ret = __thermal_zone_get_temp(tz, temp);
- else
- ret = -ENODEV;
+ ret = __thermal_zone_get_temp(tz, temp);
unlock:
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index c3ae44659b81..252116f1e535 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -80,10 +80,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
mutex_lock(&tz->lock);
- if (device_is_registered(&tz->device))
- ret = tz->ops->get_crit_temp(tz, &temperature);
- else
- ret = -ENODEV;
+ ret = tz->ops->get_crit_temp(tz, &temperature);
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 08bc46c3ec7b..332052e24a86 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -13,9 +13,14 @@
#include "thermal_core.h"
+enum thermal_genl_multicast_groups {
+ THERMAL_GENL_SAMPLING_GROUP = 0,
+ THERMAL_GENL_EVENT_GROUP = 1,
+};
+
static const struct genl_multicast_group thermal_genl_mcgrps[] = {
- { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, },
- { .name = THERMAL_GENL_EVENT_GROUP_NAME, },
+ [THERMAL_GENL_SAMPLING_GROUP] = { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, },
+ [THERMAL_GENL_EVENT_GROUP] = { .name = THERMAL_GENL_EVENT_GROUP_NAME, },
};
static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = {
@@ -71,6 +76,11 @@ typedef int (*cb_t)(struct param *);
static struct genl_family thermal_gnl_family;
+static int thermal_group_has_listeners(enum thermal_genl_multicast_groups group)
+{
+ return genl_has_listeners(&thermal_gnl_family, &init_net, group);
+}
+
/************************** Sampling encoding *******************************/
int thermal_genl_sampling_temp(int id, int temp)
@@ -78,6 +88,9 @@ int thermal_genl_sampling_temp(int id, int temp)
struct sk_buff *skb;
void *hdr;
+ if (!thermal_group_has_listeners(THERMAL_GENL_SAMPLING_GROUP))
+ return 0;
+
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -95,7 +108,7 @@ int thermal_genl_sampling_temp(int id, int temp)
genlmsg_end(skb, hdr);
- genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL);
+ genlmsg_multicast(&thermal_gnl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL);
return 0;
out_cancel:
@@ -275,6 +288,9 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
int ret = -EMSGSIZE;
void *hdr;
+ if (!thermal_group_has_listeners(THERMAL_GENL_EVENT_GROUP))
+ return 0;
+
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -290,7 +306,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
genlmsg_end(msg, hdr);
- genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL);
+ genlmsg_multicast(&thermal_gnl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL);
return 0;
@@ -450,10 +466,10 @@ out_cancel_nest:
static int thermal_genl_cmd_tz_get_trip(struct param *p)
{
struct sk_buff *msg = p->msg;
+ const struct thermal_trip *trip;
struct thermal_zone_device *tz;
struct nlattr *start_trip;
- struct thermal_trip trip;
- int ret, i, id;
+ int id;
if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
return -EINVAL;
@@ -470,16 +486,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
mutex_lock(&tz->lock);
- for (i = 0; i < tz->num_trips; i++) {
-
- ret = __thermal_zone_get_trip(tz, i, &trip);
- if (ret)
- goto out_cancel_nest;
-
- if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip.type) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip.temperature) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip.hysteresis))
+ for_each_trip(tz, trip) {
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID,
+ thermal_zone_trip_id(tz, trip)) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip->temperature) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip->hysteresis))
goto out_cancel_nest;
}
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 1e0655b63259..4d6c22e0ed85 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -475,6 +475,7 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
struct thermal_zone_params tzp = {};
struct thermal_zone_device_ops *of_ops;
struct device_node *np;
+ const char *action;
int delay, pdelay;
int ntrips, mask;
int ret;
@@ -511,6 +512,11 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
mask = GENMASK_ULL((ntrips) - 1, 0);
+ ret = of_property_read_string(np, "critical-action", &action);
+ if (!ret)
+ if (!of_ops->critical && !strcasecmp(action, "reboot"))
+ of_ops->critical = thermal_zone_device_critical_reboot;
+
tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
mask, data, of_ops, &tzp,
pdelay, delay);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index eef40d4f3063..f4033865b093 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -83,25 +83,12 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, result;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- result = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- result = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- if (result)
- return result;
-
- switch (trip.type) {
+ switch (tz->trips[trip_id].type) {
case THERMAL_TRIP_CRITICAL:
return sprintf(buf, "critical\n");
case THERMAL_TRIP_HOT:
@@ -120,28 +107,33 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
+ struct thermal_trip *trip;
int trip_id, ret;
+ int temp;
+
+ ret = kstrtoint(buf, 10, &temp);
+ if (ret)
+ return -EINVAL;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
+ trip = &tz->trips[trip_id];
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- goto unlock;
+ if (temp != trip->temperature) {
+ if (tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip_id, temp);
+ if (ret)
+ goto unlock;
+ }
- ret = kstrtoint(buf, 10, &trip.temperature);
- if (ret)
- goto unlock;
+ thermal_zone_set_trip_temp(tz, trip, temp);
+
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+ }
- ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
@@ -153,25 +145,12 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, ret;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- ret = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", trip.temperature);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].temperature);
}
static ssize_t
@@ -179,28 +158,33 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
+ struct thermal_trip *trip;
int trip_id, ret;
+ int hyst;
+
+ ret = kstrtoint(buf, 10, &hyst);
+ if (ret || hyst < 0)
+ return -EINVAL;
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
+ trip = &tz->trips[trip_id];
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- goto unlock;
+ if (hyst != trip->hysteresis) {
+ if (tz->ops->set_trip_hyst) {
+ ret = tz->ops->set_trip_hyst(tz, trip_id, hyst);
+ if (ret)
+ goto unlock;
+ }
- ret = kstrtoint(buf, 10, &trip.hysteresis);
- if (ret)
- goto unlock;
+ trip->hysteresis = hyst;
+
+ thermal_zone_trip_updated(tz, trip);
+ }
- ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
@@ -212,22 +196,12 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, ret;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- ret = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- return ret ? ret : sprintf(buf, "%d\n", trip.hysteresis);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].hysteresis);
}
static ssize_t
@@ -276,11 +250,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
-
if (!tz->ops->set_emul_temp)
tz->emul_temperature = temperature;
else
@@ -289,7 +258,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
if (!ret)
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-unlock:
mutex_unlock(&tz->lock);
return ret ? ret : count;
@@ -968,7 +936,16 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr,
return ret;
instance = container_of(attr, struct thermal_instance, weight_attr);
+
+ /* Don't race with governors using the 'weight' value */
+ mutex_lock(&instance->tz->lock);
+
instance->weight = weight;
+ thermal_governor_update_tz(instance->tz,
+ THERMAL_INSTANCE_WEIGHT_CHANGED);
+
+ mutex_unlock(&instance->tz->lock);
+
return count;
}
diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h
index 84568db5421b..b16b5dd863d9 100644
--- a/drivers/thermal/thermal_trace_ipa.h
+++ b/drivers/thermal/thermal_trace_ipa.h
@@ -8,19 +8,14 @@
#include <linux/tracepoint.h>
TRACE_EVENT(thermal_power_allocator,
- TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
- u32 total_req_power, u32 *granted_power,
- u32 total_granted_power, size_t num_actors,
- u32 power_range, u32 max_allocatable_power,
- int current_temp, s32 delta_temp),
- TP_ARGS(tz, req_power, total_req_power, granted_power,
- total_granted_power, num_actors, power_range,
- max_allocatable_power, current_temp, delta_temp),
+ TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power,
+ u32 total_granted_power, int num_actors, u32 power_range,
+ u32 max_allocatable_power, int current_temp, s32 delta_temp),
+ TP_ARGS(tz, total_req_power, total_granted_power, num_actors,
+ power_range, max_allocatable_power, current_temp, delta_temp),
TP_STRUCT__entry(
__field(int, tz_id )
- __dynamic_array(u32, req_power, num_actors )
__field(u32, total_req_power )
- __dynamic_array(u32, granted_power, num_actors)
__field(u32, total_granted_power )
__field(size_t, num_actors )
__field(u32, power_range )
@@ -30,11 +25,7 @@ TRACE_EVENT(thermal_power_allocator,
),
TP_fast_assign(
__entry->tz_id = tz->id;
- memcpy(__get_dynamic_array(req_power), req_power,
- num_actors * sizeof(*req_power));
__entry->total_req_power = total_req_power;
- memcpy(__get_dynamic_array(granted_power), granted_power,
- num_actors * sizeof(*granted_power));
__entry->total_granted_power = total_granted_power;
__entry->num_actors = num_actors;
__entry->power_range = power_range;
@@ -43,18 +34,35 @@ TRACE_EVENT(thermal_power_allocator,
__entry->delta_temp = delta_temp;
),
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
- __entry->tz_id,
- __print_array(__get_dynamic_array(req_power),
- __entry->num_actors, 4),
- __entry->total_req_power,
- __print_array(__get_dynamic_array(granted_power),
- __entry->num_actors, 4),
+ TP_printk("thermal_zone_id=%d total_req_power=%u total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
+ __entry->tz_id, __entry->total_req_power,
__entry->total_granted_power, __entry->power_range,
__entry->max_allocatable_power, __entry->current_temp,
__entry->delta_temp)
);
+TRACE_EVENT(thermal_power_actor,
+ TP_PROTO(struct thermal_zone_device *tz, int actor_id, u32 req_power,
+ u32 granted_power),
+ TP_ARGS(tz, actor_id, req_power, granted_power),
+ TP_STRUCT__entry(
+ __field(int, tz_id)
+ __field(int, actor_id)
+ __field(u32, req_power)
+ __field(u32, granted_power)
+ ),
+ TP_fast_assign(
+ __entry->tz_id = tz->id;
+ __entry->actor_id = actor_id;
+ __entry->req_power = req_power;
+ __entry->granted_power = granted_power;
+ ),
+
+ TP_printk("thermal_zone_id=%d actor_id=%d req_power=%u granted_power=%u",
+ __entry->tz_id, __entry->actor_id, __entry->req_power,
+ __entry->granted_power)
+);
+
TRACE_EVENT(thermal_power_allocator_pid,
TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
s64 p, s64 i, s64 d, s32 output),
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index e42456442c68..8bffa1e5e206 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -63,25 +63,21 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
*/
void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
- struct thermal_trip trip;
+ const struct thermal_trip *trip;
int low = -INT_MAX, high = INT_MAX;
bool same_trip = false;
- int i, ret;
+ int ret;
lockdep_assert_held(&tz->lock);
if (!tz->ops->set_trips)
return;
- for (i = 0; i < tz->num_trips; i++) {
+ for_each_trip(tz, trip) {
bool low_set = false;
int trip_low;
- ret = __thermal_zone_get_trip(tz, i , &trip);
- if (ret)
- return;
-
- trip_low = trip.temperature - trip.hysteresis;
+ trip_low = trip->temperature - trip->hysteresis;
if (trip_low < tz->temperature && trip_low > low) {
low = trip_low;
@@ -89,9 +85,9 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
same_trip = false;
}
- if (trip.temperature > tz->temperature &&
- trip.temperature < high) {
- high = trip.temperature;
+ if (trip->temperature > tz->temperature &&
+ trip->temperature < high) {
+ high = trip->temperature;
same_trip = low_set;
}
}
@@ -147,46 +143,7 @@ int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
}
EXPORT_SYMBOL_GPL(thermal_zone_get_trip);
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip)
-{
- struct thermal_trip t;
- int ret;
-
- if (!tz->ops->set_trip_temp && !tz->ops->set_trip_hyst && !tz->trips)
- return -EINVAL;
-
- ret = __thermal_zone_get_trip(tz, trip_id, &t);
- if (ret)
- return ret;
-
- if (t.type != trip->type)
- return -EINVAL;
-
- if (t.temperature != trip->temperature && tz->ops->set_trip_temp) {
- ret = tz->ops->set_trip_temp(tz, trip_id, trip->temperature);
- if (ret)
- return ret;
- }
-
- if (t.hysteresis != trip->hysteresis && tz->ops->set_trip_hyst) {
- ret = tz->ops->set_trip_hyst(tz, trip_id, trip->hysteresis);
- if (ret)
- return ret;
- }
-
- if (tz->trips && (t.temperature != trip->temperature || t.hysteresis != trip->hysteresis))
- tz->trips[trip_id] = *trip;
-
- thermal_notify_tz_trip_change(tz->id, trip_id, trip->type,
- trip->temperature, trip->hysteresis);
-
- __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
-
- return 0;
-}
-
-int thermal_zone_trip_id(struct thermal_zone_device *tz,
+int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
/*
@@ -195,3 +152,24 @@ int thermal_zone_trip_id(struct thermal_zone_device *tz,
*/
return trip - tz->trips;
}
+void thermal_zone_trip_updated(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip),
+ trip->type, trip->temperature,
+ trip->hysteresis);
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+}
+
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp)
+{
+ if (trip->temperature == temp)
+ return;
+
+ trip->temperature = temp;
+ thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip),
+ trip->type, trip->temperature,
+ trip->hysteresis);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index efe3e3b85769..fdd0fc7b8f25 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
- eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ eventfd_signal(io_data->ffs->ffs_eventfd);
if (io_data->read)
kfree(io_data->to_free);
@@ -2738,7 +2738,7 @@ static void __ffs_event_add(struct ffs_data *ffs,
ffs->ev.types[ffs->ev.count++] = type;
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
- eventfd_signal(ffs->ffs_eventfd, 1);
+ eventfd_signal(ffs->ffs_eventfd);
}
static void ffs_event_add(struct ffs_data *ffs,
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 2d57786d3db7..89e8cf2a2a7d 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1201,7 +1201,7 @@ static int max3420_probe(struct spi_device *spi)
int err, irq;
u8 reg[8];
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
dev_err(&spi->dev, "UDC needs full duplex to work\n");
return -EINVAL;
}
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 8508d37a2aff..6cdc3d805c32 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -288,7 +288,7 @@ static void fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
-int fsl_usb2_mpc5121_init(struct platform_device *pdev)
+static int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 0ddd4b8abecb..1d24da79c399 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -493,7 +493,7 @@ static void vduse_vq_kick(struct vduse_virtqueue *vq)
goto unlock;
if (vq->kickfd)
- eventfd_signal(vq->kickfd, 1);
+ eventfd_signal(vq->kickfd);
else
vq->kicked = true;
unlock:
@@ -911,7 +911,7 @@ static int vduse_kickfd_setup(struct vduse_dev *dev,
eventfd_ctx_put(vq->kickfd);
vq->kickfd = ctx;
if (vq->ready && vq->kicked && vq->kickfd) {
- eventfd_signal(vq->kickfd, 1);
+ eventfd_signal(vq->kickfd);
vq->kicked = false;
}
spin_unlock(&vq->kick_lock);
@@ -960,7 +960,7 @@ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
spin_lock_irq(&vq->irq_lock);
if (vq->ready && vq->cb.trigger) {
- eventfd_signal(vq->cb.trigger, 1);
+ eventfd_signal(vq->cb.trigger);
signal = true;
}
spin_unlock_irq(&vq->irq_lock);
@@ -1157,7 +1157,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
fput(f);
break;
}
- ret = receive_fd(f, perm_to_file_flags(entry.perm));
+ ret = receive_fd(f, NULL, perm_to_file_flags(entry.perm));
fput(f);
break;
}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index c51229fccbd6..d62fbfff20b8 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -54,7 +54,7 @@ static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
{
struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
- eventfd_signal(mc_irq->trigger, 1);
+ eventfd_signal(mc_irq->trigger);
return IRQ_HANDLED;
}
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1929103ee59a..1cbc990d42e0 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -443,7 +443,7 @@ static int vfio_pci_core_runtime_resume(struct device *dev)
*/
down_write(&vdev->memory_lock);
if (vdev->pm_wake_eventfd_ctx) {
- eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
+ eventfd_signal(vdev->pm_wake_eventfd_ctx);
__vfio_pci_runtime_pm_exit(vdev);
}
up_write(&vdev->memory_lock);
@@ -1883,7 +1883,7 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(vdev->req_trigger, 1);
+ eventfd_signal(vdev->req_trigger);
} else if (count == 0) {
pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
@@ -2302,7 +2302,7 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
mutex_lock(&vdev->igate);
if (vdev->err_trigger)
- eventfd_signal(vdev->err_trigger, 1);
+ eventfd_signal(vdev->err_trigger);
mutex_unlock(&vdev->igate);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index cbb4bcbfbf83..237beac83809 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -94,7 +94,7 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return;
- eventfd_signal(ctx->trigger, 1);
+ eventfd_signal(ctx->trigger);
}
}
@@ -342,7 +342,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
- eventfd_signal(trigger, 1);
+ eventfd_signal(trigger);
return IRQ_HANDLED;
}
@@ -689,11 +689,11 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
if (!ctx)
continue;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- eventfd_signal(ctx->trigger, 1);
+ eventfd_signal(ctx->trigger);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t *bools = data;
if (bools[i - start])
- eventfd_signal(ctx->trigger, 1);
+ eventfd_signal(ctx->trigger);
}
}
return 0;
@@ -707,7 +707,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
if (flags & VFIO_IRQ_SET_DATA_NONE) {
if (*ctx) {
if (count) {
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
} else {
eventfd_ctx_put(*ctx);
*ctx = NULL;
@@ -722,7 +722,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
trigger = *(uint8_t *)data;
if (trigger && *ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
index 665197caed89..61a1bfb68ac7 100644
--- a/drivers/vfio/platform/vfio_platform_irq.c
+++ b/drivers/vfio/platform/vfio_platform_irq.c
@@ -155,7 +155,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
spin_unlock_irqrestore(&irq_ctx->lock, flags);
if (ret == IRQ_HANDLED)
- eventfd_signal(irq_ctx->trigger, 1);
+ eventfd_signal(irq_ctx->trigger);
return ret;
}
@@ -164,7 +164,7 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
- eventfd_signal(irq_ctx->trigger, 1);
+ eventfd_signal(irq_ctx->trigger);
return IRQ_HANDLED;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index da7ec77cdaff..173beda74b38 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -178,7 +178,7 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
- eventfd_signal(call_ctx, 1);
+ eventfd_signal(call_ctx);
return IRQ_HANDLED;
}
@@ -189,7 +189,7 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
struct eventfd_ctx *config_ctx = v->config_ctx;
if (config_ctx)
- eventfd_signal(config_ctx, 1);
+ eventfd_signal(config_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e0c181ad17e3..045f666b4f12 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2248,7 +2248,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
len -= l;
if (!len) {
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
return 0;
}
}
@@ -2271,7 +2271,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
log_used(vq, (used - (void __user *)vq->used),
sizeof vq->used->flags);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
@@ -2289,7 +2289,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq)
log_used(vq, (used - (void __user *)vq->used),
sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
@@ -2715,7 +2715,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
log_used(vq, offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return r;
}
@@ -2763,7 +2763,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
if (vq->call_ctx.ctx && vhost_notify(dev, vq))
- eventfd_signal(vq->call_ctx.ctx, 1);
+ eventfd_signal(vq->call_ctx.ctx);
}
EXPORT_SYMBOL_GPL(vhost_signal);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f60d5f7bef94..9e942fcda5c3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -249,7 +249,7 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
- eventfd_signal((vq)->error_ctx, 1);\
+ eventfd_signal((vq)->error_ctx);\
} while (0)
enum {
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index a80939fe2ee6..6a29d2594b91 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -927,8 +927,8 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
if (request_size == 0)
return -1;
- if (order <= MAX_ORDER) {
- /* Call alloc_pages if the size is less than 2^MAX_ORDER */
+ if (order <= MAX_PAGE_ORDER) {
+ /* Call alloc_pages if the size is less than 2^MAX_PAGE_ORDER */
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -1;
@@ -958,7 +958,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
{
unsigned int order = get_order(size);
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
dma_free_coherent(&hdev->device,
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 840ead69654b..a32e5b2924c9 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -197,7 +197,7 @@ static int vmlfb_alloc_vram(struct vml_info *vinfo,
va = &vinfo->vram[i];
order = 0;
- while (requested > (PAGE_SIZE << order) && order <= MAX_ORDER)
+ while (requested > (PAGE_SIZE << order) && order <= MAX_PAGE_ORDER)
order++;
err = vmlfb_alloc_vram_area(va, order, 0);
diff --git a/drivers/virt/acrn/ioeventfd.c b/drivers/virt/acrn/ioeventfd.c
index ac4037e9f947..4e845c6ca0b5 100644
--- a/drivers/virt/acrn/ioeventfd.c
+++ b/drivers/virt/acrn/ioeventfd.c
@@ -223,7 +223,7 @@ static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
mutex_lock(&client->vm->ioeventfds_lock);
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
if (p)
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
mutex_unlock(&client->vm->ioeventfds_lock);
return 0;
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index bc564adcf499..87f241825bc3 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -994,7 +994,7 @@ e_unmap:
return ret;
}
-static int __exit sev_guest_remove(struct platform_device *pdev)
+static void __exit sev_guest_remove(struct platform_device *pdev)
{
struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
@@ -1003,8 +1003,6 @@ static int __exit sev_guest_remove(struct platform_device *pdev)
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
deinit_crypto(snp_dev->crypto);
misc_deregister(&snp_dev->misc);
-
- return 0;
}
/*
@@ -1013,7 +1011,7 @@ static int __exit sev_guest_remove(struct platform_device *pdev)
* with the SEV-SNP support, it is named "sev-guest".
*/
static struct platform_driver sev_guest_driver = {
- .remove = __exit_p(sev_guest_remove),
+ .remove_new = __exit_p(sev_guest_remove),
.driver = {
.name = "sev-guest",
},
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1fe93e93f5bc..59cdc0292dce 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -33,7 +33,7 @@
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
/* The order of free page blocks to report to host */
-#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_ORDER
+#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
/* The size of a free page block in bytes */
#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
(1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index fa5226c198cc..8e3223294442 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1154,13 +1154,13 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- unsigned long order = MAX_ORDER;
+ unsigned long order = MAX_PAGE_ORDER;
unsigned long i;
/*
* We might get called for ranges that don't cover properly aligned
- * MAX_ORDER pages; however, we can only online properly aligned
- * pages with an order of MAX_ORDER at maximum.
+ * MAX_PAGE_ORDER pages; however, we can only online properly aligned
+ * pages with an order of MAX_PAGE_ORDER at maximum.
*/
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--;
@@ -1280,7 +1280,7 @@ static void virtio_mem_online_page(struct virtio_mem *vm,
bool do_online;
/*
- * We can get called with any order up to MAX_ORDER. If our subblock
+ * We can get called with any order up to MAX_PAGE_ORDER. If our subblock
* size is smaller than that and we have a mixture of plugged and
* unplugged subblocks within such a page, we have to process in
* smaller granularity. In that case we'll adjust the order exactly once
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 0eb337a8ec0f..35b6e306026a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1147,7 +1147,7 @@ static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
ioreq->size == kioeventfd->addr_len &&
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
- eventfd_signal(kioeventfd->eventfd, 1);
+ eventfd_signal(kioeventfd->eventfd);
state = STATE_IORESP_READY;
break;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 42837617a55b..a3159831ba98 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -18,6 +18,10 @@ config VALIDATE_FS_PARSER
config FS_IOMAP
bool
+# Stackable filesystems
+config FS_STACK
+ bool
+
config BUFFER_HEAD
bool
@@ -254,7 +258,7 @@ config TMPFS_QUOTA
config ARCH_SUPPORTS_HUGETLBFS
def_bool n
-config HUGETLBFS
+menuconfig HUGETLBFS
bool "HugeTLB file system support"
depends on X86 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
depends on (SYSFS || SYSCTL)
@@ -266,6 +270,17 @@ config HUGETLBFS
If unsure, say N.
+if HUGETLBFS
+config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
+ bool "HugeTLB Vmemmap Optimization (HVO) defaults to on"
+ default n
+ depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ help
+ The HugeTLB Vmemmap Optimization (HVO) defaults to off. Say Y here to
+ enable HVO by default. It can be disabled via hugetlb_free_vmemmap=off
+ (boot command line) or hugetlb_optimize_vmemmap (sysctl).
+endif # HUGETLBFS
+
config HUGETLB_PAGE
def_bool HUGETLBFS
select XARRAY_MULTI
@@ -275,15 +290,6 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
-config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
- bool "HugeTLB Vmemmap Optimization (HVO) defaults to on"
- default n
- depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
- help
- The HugeTLB VmemmapvOptimization (HVO) defaults to off. Say Y here to
- enable HVO by default. It can be disabled via hugetlb_free_vmemmap=off
- (boot command line) or hugetlb_optimize_vmemmap (sysctl).
-
config ARCH_HAS_GIGANTIC_PAGE
bool
diff --git a/fs/Makefile b/fs/Makefile
index 75522f88e763..a6962c588962 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
+obj-$(CONFIG_FS_STACK) += backing-file.o
obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 3081edb09e46..a183e213a4a5 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -5,6 +5,7 @@
* Copyright (C) 1997-1999 Russell King
*/
#include <linux/buffer_head.h>
+#include <linux/mpage.h>
#include <linux/writeback.h>
#include "adfs.h"
@@ -33,9 +34,10 @@ abort_toobig:
return 0;
}
-static int adfs_writepage(struct page *page, struct writeback_control *wbc)
+static int adfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, adfs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, adfs_get_block);
}
static int adfs_read_folio(struct file *file, struct folio *folio)
@@ -76,10 +78,11 @@ static const struct address_space_operations adfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = adfs_read_folio,
- .writepage = adfs_writepage,
+ .writepages = adfs_writepages,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
- .bmap = _adfs_bmap
+ .migrate_folio = buffer_migrate_folio,
+ .bmap = _adfs_bmap,
};
/*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 4a168781936b..e87b52b1f34c 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -242,7 +242,7 @@ static void afs_kill_pages(struct address_space *mapping,
folio_clear_uptodate(folio);
folio_end_writeback(folio);
folio_lock(folio);
- generic_error_remove_page(mapping, &folio->page);
+ generic_error_remove_folio(mapping, folio);
folio_unlock(folio);
folio_put(folio);
@@ -559,8 +559,7 @@ static void afs_extend_writeback(struct address_space *mapping,
if (!folio_clear_dirty_for_io(folio))
BUG();
- if (folio_start_writeback(folio))
- BUG();
+ folio_start_writeback(folio);
afs_folio_start_fscache(caching, folio);
*_count -= folio_nr_pages(folio);
@@ -595,8 +594,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
_enter(",%lx,%llx-%llx", folio_index(folio), start, end);
- if (folio_start_writeback(folio))
- BUG();
+ folio_start_writeback(folio);
afs_folio_start_fscache(caching, folio);
count -= folio_nr_pages(folio);
diff --git a/fs/aio.c b/fs/aio.c
index f8589caef9c1..ffe65c1aab4e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -266,7 +266,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
return ERR_CAST(inode);
inode->i_mapping->a_ops = &aio_ctx_aops;
- inode->i_mapping->private_data = ctx;
+ inode->i_mapping->i_private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
@@ -316,10 +316,10 @@ static void put_aio_ring_file(struct kioctx *ctx)
/* Prevent further access to the kioctx from migratepages */
i_mapping = aio_ring_file->f_mapping;
- spin_lock(&i_mapping->private_lock);
- i_mapping->private_data = NULL;
+ spin_lock(&i_mapping->i_private_lock);
+ i_mapping->i_private_data = NULL;
ctx->aio_ring_file = NULL;
- spin_unlock(&i_mapping->private_lock);
+ spin_unlock(&i_mapping->i_private_lock);
fput(aio_ring_file);
}
@@ -422,9 +422,9 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
rc = 0;
- /* mapping->private_lock here protects against the kioctx teardown. */
- spin_lock(&mapping->private_lock);
- ctx = mapping->private_data;
+ /* mapping->i_private_lock here protects against the kioctx teardown. */
+ spin_lock(&mapping->i_private_lock);
+ ctx = mapping->i_private_data;
if (!ctx) {
rc = -EINVAL;
goto out;
@@ -476,7 +476,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
out_unlock:
mutex_unlock(&ctx->ring_lock);
out:
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return rc;
}
#else
@@ -1106,6 +1106,11 @@ static inline void iocb_destroy(struct aio_kiocb *iocb)
kmem_cache_free(kiocb_cachep, iocb);
}
+struct aio_waiter {
+ struct wait_queue_entry w;
+ size_t min_nr;
+};
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1114,7 +1119,7 @@ static void aio_complete(struct aio_kiocb *iocb)
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
- unsigned tail, pos, head;
+ unsigned tail, pos, head, avail;
unsigned long flags;
/*
@@ -1156,6 +1161,10 @@ static void aio_complete(struct aio_kiocb *iocb)
ctx->completed_events++;
if (ctx->completed_events > 1)
refill_reqs_available(ctx, head, tail);
+
+ avail = tail > head
+ ? tail - head
+ : tail + ctx->nr_events - head;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1166,7 +1175,7 @@ static void aio_complete(struct aio_kiocb *iocb)
* from IRQ context.
*/
if (iocb->ki_eventfd)
- eventfd_signal(iocb->ki_eventfd, 1);
+ eventfd_signal(iocb->ki_eventfd);
/*
* We have to order our ring_info tail store above and test
@@ -1176,8 +1185,18 @@ static void aio_complete(struct aio_kiocb *iocb)
*/
smp_mb();
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->wait)) {
+ struct aio_waiter *curr, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->wait.lock, flags);
+ list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
+ if (avail >= curr->min_nr) {
+ list_del_init_careful(&curr->w.entry);
+ wake_up_process(curr->w.private);
+ }
+ spin_unlock_irqrestore(&ctx->wait.lock, flags);
+ }
}
static inline void iocb_put(struct aio_kiocb *iocb)
@@ -1290,7 +1309,9 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
struct io_event __user *event,
ktime_t until)
{
- long ret = 0;
+ struct hrtimer_sleeper t;
+ struct aio_waiter w;
+ long ret = 0, ret2 = 0;
/*
* Note that aio_read_events() is being called as the conditional - i.e.
@@ -1306,12 +1327,38 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
* the ringbuffer empty. So in practice we should be ok, but it's
* something to be aware of when touching this code.
*/
- if (until == 0)
- aio_read_events(ctx, min_nr, nr, event, &ret);
- else
- wait_event_interruptible_hrtimeout(ctx->wait,
- aio_read_events(ctx, min_nr, nr, event, &ret),
- until);
+ aio_read_events(ctx, min_nr, nr, event, &ret);
+ if (until == 0 || ret < 0 || ret >= min_nr)
+ return ret;
+
+ hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ if (until != KTIME_MAX) {
+ hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
+ hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
+ }
+
+ init_wait(&w.w);
+
+ while (1) {
+ unsigned long nr_got = ret;
+
+ w.min_nr = min_nr - ret;
+
+ ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
+ if (!ret2 && !t.task)
+ ret2 = -ETIME;
+
+ if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
+ break;
+
+ if (nr_got == ret)
+ schedule();
+ }
+
+ finish_wait(&ctx->wait, &w.w);
+ hrtimer_cancel(&t.timer);
+ destroy_hrtimer_on_stack(&t.timer);
+
return ret;
}
@@ -1498,7 +1545,7 @@ static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
size_t len = iocb->aio_nbytes;
if (!vectored) {
- ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
+ ssize_t ret = import_ubuf(rw, buf, len, iter);
*iovec = NULL;
return ret;
}
diff --git a/fs/attr.c b/fs/attr.c
index bdf5deb06ea9..5a13f0c8495f 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -157,7 +157,7 @@ static bool chgrp_ok(struct mnt_idmap *idmap,
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs @nop_mnt_idmap.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*
* Should be called as the first thing in ->setattr implementations,
* possibly after taking additional locks.
diff --git a/fs/backing-file.c b/fs/backing-file.c
new file mode 100644
index 000000000000..a681f38d84d8
--- /dev/null
+++ b/fs/backing-file.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Forked from fs/overlayfs/file.c.
+ *
+ * Copyright (C) 2017 Red Hat, Inc.
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#include <linux/fs.h>
+#include <linux/backing-file.h>
+#include <linux/splice.h>
+#include <linux/mm.h>
+
+#include "internal.h"
+
+/**
+ * backing_file_open - open a backing file for kernel internal use
+ * @user_path: path that the user reuqested to open
+ * @flags: open flags
+ * @real_path: path of the backing file
+ * @cred: credentials for open
+ *
+ * Open a backing file for a stackable filesystem (e.g., overlayfs).
+ * @user_path may be on the stackable filesystem and @real_path on the
+ * underlying filesystem. In this case, we want to be able to return the
+ * @user_path of the stackable filesystem. This is done by embedding the
+ * returned file into a container structure that also stores the stacked
+ * file's path, which can be retrieved using backing_file_user_path().
+ */
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred)
+{
+ struct file *f;
+ int error;
+
+ f = alloc_empty_backing_file(flags, cred);
+ if (IS_ERR(f))
+ return f;
+
+ path_get(user_path);
+ *backing_file_user_path(f) = *user_path;
+ error = vfs_open(real_path, f);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+
+ return f;
+}
+EXPORT_SYMBOL_GPL(backing_file_open);
+
+struct backing_aio {
+ struct kiocb iocb;
+ refcount_t ref;
+ struct kiocb *orig_iocb;
+ /* used for aio completion */
+ void (*end_write)(struct file *);
+ struct work_struct work;
+ long res;
+};
+
+static struct kmem_cache *backing_aio_cachep;
+
+#define BACKING_IOCB_MASK \
+ (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
+
+static rwf_t iocb_to_rw_flags(int flags)
+{
+ return (__force rwf_t)(flags & BACKING_IOCB_MASK);
+}
+
+static void backing_aio_put(struct backing_aio *aio)
+{
+ if (refcount_dec_and_test(&aio->ref)) {
+ fput(aio->iocb.ki_filp);
+ kmem_cache_free(backing_aio_cachep, aio);
+ }
+}
+
+static void backing_aio_cleanup(struct backing_aio *aio, long res)
+{
+ struct kiocb *iocb = &aio->iocb;
+ struct kiocb *orig_iocb = aio->orig_iocb;
+
+ if (aio->end_write)
+ aio->end_write(orig_iocb->ki_filp);
+
+ orig_iocb->ki_pos = iocb->ki_pos;
+ backing_aio_put(aio);
+}
+
+static void backing_aio_rw_complete(struct kiocb *iocb, long res)
+{
+ struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
+ struct kiocb *orig_iocb = aio->orig_iocb;
+
+ if (iocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(iocb);
+
+ backing_aio_cleanup(aio, res);
+ orig_iocb->ki_complete(orig_iocb, res);
+}
+
+static void backing_aio_complete_work(struct work_struct *work)
+{
+ struct backing_aio *aio = container_of(work, struct backing_aio, work);
+
+ backing_aio_rw_complete(&aio->iocb, aio->res);
+}
+
+static void backing_aio_queue_completion(struct kiocb *iocb, long res)
+{
+ struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
+
+ /*
+ * Punt to a work queue to serialize updates of mtime/size.
+ */
+ aio->res = res;
+ INIT_WORK(&aio->work, backing_aio_complete_work);
+ queue_work(file_inode(aio->orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
+ &aio->work);
+}
+
+static int backing_aio_init_wq(struct kiocb *iocb)
+{
+ struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
+
+ if (sb->s_dio_done_wq)
+ return 0;
+
+ return sb_init_dio_done_wq(sb);
+}
+
+
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx)
+{
+ struct backing_aio *aio = NULL;
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ !(file->f_mode & FMODE_CAN_ODIRECT))
+ return -EINVAL;
+
+ old_cred = override_creds(ctx->cred);
+ if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(flags);
+
+ ret = vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
+ } else {
+ ret = -ENOMEM;
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ goto out;
+
+ aio->orig_iocb = iocb;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_complete = backing_aio_rw_complete;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
+ }
+out:
+ revert_creds(old_cred);
+
+ if (ctx->accessed)
+ ctx->accessed(ctx->user_file);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_read_iter);
+
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx)
+{
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ ret = file_remove_privs(ctx->user_file);
+ if (ret)
+ return ret;
+
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ !(file->f_mode & FMODE_CAN_ODIRECT))
+ return -EINVAL;
+
+ /*
+ * Stacked filesystems don't support deferred completions, don't copy
+ * this property in case it is set by the issuer.
+ */
+ flags &= ~IOCB_DIO_CALLER_COMP;
+
+ old_cred = override_creds(ctx->cred);
+ if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(flags);
+
+ ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
+ if (ctx->end_write)
+ ctx->end_write(ctx->user_file);
+ } else {
+ struct backing_aio *aio;
+
+ ret = backing_aio_init_wq(iocb);
+ if (ret)
+ goto out;
+
+ ret = -ENOMEM;
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ goto out;
+
+ aio->orig_iocb = iocb;
+ aio->end_write = ctx->end_write;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_flags = flags;
+ aio->iocb.ki_complete = backing_aio_queue_completion;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
+ }
+out:
+ revert_creds(old_cred);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_write_iter);
+
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx)
+{
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ old_cred = override_creds(ctx->cred);
+ ret = vfs_splice_read(in, ppos, pipe, len, flags);
+ revert_creds(old_cred);
+
+ if (ctx->accessed)
+ ctx->accessed(ctx->user_file);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_splice_read);
+
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx)
+{
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ ret = file_remove_privs(ctx->user_file);
+ if (ret)
+ return ret;
+
+ old_cred = override_creds(ctx->cred);
+ file_start_write(out);
+ ret = iter_file_splice_write(pipe, out, ppos, len, flags);
+ file_end_write(out);
+ revert_creds(old_cred);
+
+ if (ctx->end_write)
+ ctx->end_write(ctx->user_file);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_splice_write);
+
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx)
+{
+ const struct cred *old_cred;
+ int ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)) ||
+ WARN_ON_ONCE(ctx->user_file != vma->vm_file))
+ return -EIO;
+
+ if (!file->f_op->mmap)
+ return -ENODEV;
+
+ vma_set_file(vma, file);
+
+ old_cred = override_creds(ctx->cred);
+ ret = call_mmap(vma->vm_file, vma);
+ revert_creds(old_cred);
+
+ if (ctx->accessed)
+ ctx->accessed(ctx->user_file);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_mmap);
+
+static int __init backing_aio_init(void)
+{
+ backing_aio_cachep = kmem_cache_create("backing_aio",
+ sizeof(struct backing_aio),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!backing_aio_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(backing_aio_init);
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index 45b64f89258c..b81268418174 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -28,6 +28,7 @@ bcachefs-y := \
clock.o \
compress.o \
counters.o \
+ darray.o \
debug.o \
dirent.o \
disk_groups.o \
@@ -70,6 +71,7 @@ bcachefs-y := \
reflink.o \
replicas.o \
sb-clean.o \
+ sb-downgrade.o \
sb-errors.o \
sb-members.o \
siphash.o \
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index f3809897f00a..3640f417cce1 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -366,7 +366,8 @@ retry:
bch2_trans_begin(trans);
acl = _acl;
- ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
+ ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
+ bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
BTREE_ITER_INTENT);
if (ret)
goto btree_err;
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index dfa22f9d9a1d..b62737fdf5ab 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -737,6 +737,7 @@ struct bch_fs {
unsigned nsec_per_time_unit;
u64 features;
u64 compat;
+ unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
} sb;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 1ab1f08d763b..fe78e87603fc 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -1207,19 +1207,21 @@ struct bch_sb_field {
};
#define BCH_SB_FIELDS() \
- x(journal, 0) \
- x(members_v1, 1) \
- x(crypt, 2) \
- x(replicas_v0, 3) \
- x(quota, 4) \
- x(disk_groups, 5) \
- x(clean, 6) \
- x(replicas, 7) \
- x(journal_seq_blacklist, 8) \
- x(journal_v2, 9) \
- x(counters, 10) \
- x(members_v2, 11) \
- x(errors, 12)
+ x(journal, 0) \
+ x(members_v1, 1) \
+ x(crypt, 2) \
+ x(replicas_v0, 3) \
+ x(quota, 4) \
+ x(disk_groups, 5) \
+ x(clean, 6) \
+ x(replicas, 7) \
+ x(journal_seq_blacklist, 8) \
+ x(journal_v2, 9) \
+ x(counters, 10) \
+ x(members_v2, 11) \
+ x(errors, 12) \
+ x(ext, 13) \
+ x(downgrade, 14)
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
@@ -1631,6 +1633,24 @@ struct bch_sb_field_errors {
LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
+struct bch_sb_field_ext {
+ struct bch_sb_field field;
+ __le64 recovery_passes_required[2];
+ __le64 errors_silent[8];
+};
+
+struct bch_sb_field_downgrade_entry {
+ __le16 version;
+ __le64 recovery_passes[2];
+ __le16 nr_errors;
+ __le16 errors[] __counted_by(nr_errors);
+} __packed __aligned(2);
+
+struct bch_sb_field_downgrade {
+ struct bch_sb_field field;
+ struct bch_sb_field_downgrade_entry entries[];
+};
+
/* Superblock: */
/*
@@ -1644,6 +1664,11 @@ LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
#define RECOVERY_PASS_ALL_FSCK (1ULL << 63)
+/*
+ * field 1: version name
+ * field 2: BCH_VERSION(major, minor)
+ * field 3: recovery passess required on upgrade
+ */
#define BCH_METADATA_VERSIONS() \
x(bkey_renumber, BCH_VERSION(0, 10), \
RECOVERY_PASS_ALL_FSCK) \
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 6be79129738d..da594e006769 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -2085,18 +2085,16 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
goto out_no_locked;
/*
- * iter->pos should be mononotically increasing, and always be
- * equal to the key we just returned - except extents can
- * straddle iter->pos:
+ * We need to check against @end before FILTER_SNAPSHOTS because
+ * if we get to a different inode that requested we might be
+ * seeing keys for a different snapshot tree that will all be
+ * filtered out.
+ *
+ * But we can't do the full check here, because bkey_start_pos()
+ * isn't monotonically increasing before FILTER_SNAPSHOTS, and
+ * that's what we check against in extents mode:
*/
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
- iter_pos = k.k->p;
- else
- iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
-
- if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
- ? bkey_gt(iter_pos, end)
- : bkey_ge(iter_pos, end)))
+ if (k.k->p.inode > end.inode)
goto end;
if (iter->update_path &&
@@ -2155,6 +2153,21 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
continue;
}
+ /*
+ * iter->pos should be mononotically increasing, and always be
+ * equal to the key we just returned - except extents can
+ * straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter_pos = k.k->p;
+ else
+ iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
+
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(iter_pos, end)
+ : bkey_ge(iter_pos, end)))
+ goto end;
+
break;
}
diff --git a/fs/bcachefs/darray.c b/fs/bcachefs/darray.c
new file mode 100644
index 000000000000..ac35b8b705ae
--- /dev/null
+++ b/fs/bcachefs/darray.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include "darray.h"
+
+int __bch2_darray_resize(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
+{
+ if (new_size > d->size) {
+ new_size = roundup_pow_of_two(new_size);
+
+ void *data = kvmalloc_array(new_size, element_size, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data, d->data, d->size * element_size);
+ if (d->data != d->preallocated)
+ kvfree(d->data);
+ d->data = data;
+ d->size = new_size;
+ }
+
+ return 0;
+}
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
index 87b4b2d1ec76..e367c625f057 100644
--- a/fs/bcachefs/darray.h
+++ b/fs/bcachefs/darray.h
@@ -8,39 +8,48 @@
* Inspired by CCAN's darray
*/
-#include "util.h"
#include <linux/slab.h>
-#define DARRAY(type) \
+#define DARRAY_PREALLOCATED(_type, _nr) \
struct { \
size_t nr, size; \
- type *data; \
+ _type *data; \
+ _type preallocated[_nr]; \
}
-typedef DARRAY(void) darray_void;
+#define DARRAY(_type) DARRAY_PREALLOCATED(_type, 0)
-static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more, gfp_t gfp)
+typedef DARRAY(char) darray_char;
+
+int __bch2_darray_resize(darray_char *, size_t, size_t, gfp_t);
+
+static inline int __darray_resize(darray_char *d, size_t element_size,
+ size_t new_size, gfp_t gfp)
{
- if (d->nr + more > d->size) {
- size_t new_size = roundup_pow_of_two(d->nr + more);
- void *data = krealloc_array(d->data, new_size, t_size, gfp);
+ return unlikely(new_size > d->size)
+ ? __bch2_darray_resize(d, element_size, new_size, gfp)
+ : 0;
+}
- if (!data)
- return -ENOMEM;
+#define darray_resize_gfp(_d, _new_size, _gfp) \
+ unlikely(__darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp))
- d->data = data;
- d->size = new_size;
- }
+#define darray_resize(_d, _new_size) \
+ darray_resize_gfp(_d, _new_size, GFP_KERNEL)
- return 0;
+static inline int __darray_make_room(darray_char *d, size_t t_size, size_t more, gfp_t gfp)
+{
+ return __darray_resize(d, t_size, d->nr + more, gfp);
}
#define darray_make_room_gfp(_d, _more, _gfp) \
- __darray_make_room((darray_void *) (_d), sizeof((_d)->data[0]), (_more), _gfp)
+ __darray_make_room((darray_char *) (_d), sizeof((_d)->data[0]), (_more), _gfp)
#define darray_make_room(_d, _more) \
darray_make_room_gfp(_d, _more, GFP_KERNEL)
+#define darray_room(_d) ((_d).size - (_d).nr)
+
#define darray_top(_d) ((_d).data[(_d).nr])
#define darray_push_gfp(_d, _item, _gfp) \
@@ -80,13 +89,16 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
#define darray_init(_d) \
do { \
- (_d)->data = NULL; \
- (_d)->nr = (_d)->size = 0; \
+ (_d)->nr = 0; \
+ (_d)->size = ARRAY_SIZE((_d)->preallocated); \
+ (_d)->data = (_d)->size ? (_d)->preallocated : NULL; \
} while (0)
#define darray_exit(_d) \
do { \
- kfree((_d)->data); \
+ if (!ARRAY_SIZE((_d)->preallocated) || \
+ (_d)->data != (_d)->preallocated) \
+ kvfree((_d)->data); \
darray_init(_d); \
} while (0)
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index ae7910bf2228..9ce29681eec9 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -95,6 +95,7 @@
x(ENOSPC, ENOSPC_sb_members) \
x(ENOSPC, ENOSPC_sb_members_v2) \
x(ENOSPC, ENOSPC_sb_crypt) \
+ x(ENOSPC, ENOSPC_sb_downgrade) \
x(ENOSPC, ENOSPC_btree_slot) \
x(ENOSPC, ENOSPC_snapshot_tree) \
x(ENOENT, ENOENT_bkey_type_mismatch) \
@@ -218,6 +219,8 @@
x(BCH_ERR_invalid_sb, invalid_sb_quota) \
x(BCH_ERR_invalid_sb, invalid_sb_errors) \
x(BCH_ERR_invalid_sb, invalid_sb_opt_compression) \
+ x(BCH_ERR_invalid_sb, invalid_sb_ext) \
+ x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \
x(BCH_ERR_invalid, invalid_bkey) \
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
x(EIO, btree_node_read_err) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 7b28d37922fd..25cf78a7b946 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -152,6 +152,9 @@ int bch2_fsck_err(struct bch_fs *c,
struct printbuf buf = PRINTBUF, *out = &buf;
int ret = -BCH_ERR_fsck_ignore;
+ if (test_bit(err, c->sb.errors_silent))
+ return -BCH_ERR_fsck_fix;
+
bch2_sb_error_count(c, err);
va_start(args, fmt);
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 9a479e4de6b3..84e20c3ada6c 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -216,11 +216,11 @@ struct dio_write {
struct address_space *mapping;
struct bch_inode_info *inode;
struct mm_struct *mm;
+ const struct iovec *iov;
unsigned loop:1,
extending:1,
sync:1,
- flush:1,
- free_iov:1;
+ flush:1;
struct quota_res quota_res;
u64 written;
@@ -312,12 +312,10 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
return -1;
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
GFP_KERNEL);
if (unlikely(!iov))
return -ENOMEM;
-
- dio->free_iov = true;
}
memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
@@ -381,8 +379,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
bch2_pagecache_block_put(inode);
- if (dio->free_iov)
- kfree(dio->iter.__iov);
+ kfree(dio->iov);
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
@@ -626,11 +623,11 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
dio->mapping = mapping;
dio->inode = inode;
dio->mm = current->mm;
+ dio->iov = NULL;
dio->loop = false;
dio->extending = extending;
dio->sync = is_sync_kiocb(req) || extending;
dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
- dio->free_iov = false;
dio->quota_res.sectors = 0;
dio->written = 0;
dio->iter = *iter;
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index a70b7a03057d..94e5a567fa44 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -100,7 +100,8 @@ static int bch2_ioc_setflags(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
+ ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
ATTR_CTIME);
mutex_unlock(&inode->ei_update_lock);
@@ -183,13 +184,10 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_set_projid(c, inode, fa.fsx_projid);
- if (ret)
- goto err_unlock;
-
- ret = bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
+ ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ bch2_set_projid(c, inode, fa.fsx_projid) ?:
+ bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
ATTR_CTIME);
-err_unlock:
mutex_unlock(&inode->ei_update_lock);
err:
inode_unlock(&inode->v);
@@ -291,14 +289,14 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
switch (flags) {
case FSOP_GOING_FLAGS_DEFAULT:
- ret = freeze_bdev(c->vfs_sb->s_bdev);
+ ret = bdev_freeze(c->vfs_sb->s_bdev);
if (ret)
goto err;
bch2_journal_flush(&c->journal);
c->vfs_sb->s_flags |= SB_RDONLY;
bch2_fs_emergency_read_only(c);
- thaw_bdev(c->vfs_sb->s_bdev);
+ bdev_thaw(c->vfs_sb->s_bdev);
break;
case FSOP_GOING_FLAGS_LOGFLUSH:
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index ba93e32d7708..c1895df1bffe 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -258,7 +258,8 @@ __bch2_create(struct mnt_idmap *idmap,
retry:
bch2_trans_begin(trans);
- ret = bch2_create_trans(trans,
+ ret = bch2_subvol_is_ro_trans(trans, dir->ei_subvol) ?:
+ bch2_create_trans(trans,
inode_inum(dir), &dir_u, &inode_u,
!(flags & BCH_CREATE_TMPFILE)
? &dentry->d_name : NULL,
@@ -430,7 +431,9 @@ static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
lockdep_assert_held(&inode->v.i_rwsem);
- ret = __bch2_link(c, inode, dir, dentry);
+ ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
+ bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ __bch2_link(c, inode, dir, dentry);
if (unlikely(ret))
return ret;
@@ -481,7 +484,11 @@ err:
static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
{
- return __bch2_unlink(vdir, dentry, false);
+ struct bch_inode_info *dir= to_bch_ei(vdir);
+ struct bch_fs *c = dir->v.i_sb->s_fs_info;
+
+ return bch2_subvol_is_ro(c, dir->ei_subvol) ?:
+ __bch2_unlink(vdir, dentry, false);
}
static int bch2_symlink(struct mnt_idmap *idmap,
@@ -562,6 +569,11 @@ static int bch2_rename2(struct mnt_idmap *idmap,
src_inode,
dst_inode);
+ ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_subvol) ?:
+ bch2_subvol_is_ro_trans(trans, dst_dir->ei_subvol);
+ if (ret)
+ goto err;
+
if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) {
ret = bch2_fs_quota_transfer(c, src_inode,
dst_dir->ei_qid,
@@ -783,11 +795,13 @@ static int bch2_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
int ret;
lockdep_assert_held(&inode->v.i_rwsem);
- ret = setattr_prepare(idmap, dentry, iattr);
+ ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ setattr_prepare(idmap, dentry, iattr);
if (ret)
return ret;
@@ -1010,12 +1024,26 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
return bch2_err_class(ret);
}
+static int bch2_open(struct inode *vinode, struct file *file)
+{
+ if (file->f_flags & (O_WRONLY|O_RDWR)) {
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+ int ret = bch2_subvol_is_ro(c, inode->ei_subvol);
+ if (ret)
+ return ret;
+ }
+
+ return generic_file_open(vinode, file);
+}
+
static const struct file_operations bch_file_operations = {
+ .open = bch2_open,
.llseek = bch2_llseek,
.read_iter = bch2_read_iter,
.write_iter = bch2_write_iter,
.mmap = bch2_mmap,
- .open = generic_file_open,
.fsync = bch2_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
@@ -1103,7 +1131,7 @@ static const struct address_space_operations bch_address_space_operations = {
#ifdef CONFIG_MIGRATION
.migrate_folio = filemap_migrate_folio,
#endif
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
struct bcachefs_fid {
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 8ede46b1e354..8c8cb1541ac9 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -1216,6 +1216,12 @@ static CLOSURE_CALLBACK(bch2_nocow_write_done)
bch2_write_done(cl);
}
+struct bucket_to_lock {
+ struct bpos b;
+ unsigned gen;
+ struct nocow_lock_bucket *l;
+};
+
static void bch2_nocow_write(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
@@ -1224,18 +1230,16 @@ static void bch2_nocow_write(struct bch_write_op *op)
struct bkey_s_c k;
struct bkey_ptrs_c ptrs;
const struct bch_extent_ptr *ptr;
- struct {
- struct bpos b;
- unsigned gen;
- struct nocow_lock_bucket *l;
- } buckets[BCH_REPLICAS_MAX];
- unsigned nr_buckets = 0;
+ DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
+ struct bucket_to_lock *i;
u32 snapshot;
- int ret, i;
+ struct bucket_to_lock *stale_at;
+ int ret;
if (op->flags & BCH_WRITE_MOVE)
return;
+ darray_init(&buckets);
trans = bch2_trans_get(c);
retry:
bch2_trans_begin(trans);
@@ -1250,7 +1254,7 @@ retry:
while (1) {
struct bio *bio = &op->wbio.bio;
- nr_buckets = 0;
+ buckets.nr = 0;
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
@@ -1263,26 +1267,26 @@ retry:
break;
if (bch2_keylist_realloc(&op->insert_keys,
- op->inline_keys,
- ARRAY_SIZE(op->inline_keys),
- k.k->u64s))
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ k.k->u64s))
break;
/* Get iorefs before dropping btree locks: */
ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
- buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
- buckets[nr_buckets].gen = ptr->gen;
- buckets[nr_buckets].l =
- bucket_nocow_lock(&c->nocow_locks,
- bucket_to_u64(buckets[nr_buckets].b));
-
- prefetch(buckets[nr_buckets].l);
+ struct bpos b = PTR_BUCKET_POS(c, ptr);
+ struct nocow_lock_bucket *l =
+ bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
+ prefetch(l);
if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
goto err_get_ioref;
- nr_buckets++;
+ /* XXX allocating memory with btree locks held - rare */
+ darray_push_gfp(&buckets, ((struct bucket_to_lock) {
+ .b = b, .gen = ptr->gen, .l = l,
+ }), GFP_KERNEL|__GFP_NOFAIL);
if (ptr->unwritten)
op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
@@ -1296,21 +1300,21 @@ retry:
if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
- for (i = 0; i < nr_buckets; i++) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
- struct nocow_lock_bucket *l = buckets[i].l;
- bool stale;
+ darray_for_each(buckets, i) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
- __bch2_bucket_nocow_lock(&c->nocow_locks, l,
- bucket_to_u64(buckets[i].b),
+ __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
+ bucket_to_u64(i->b),
BUCKET_NOCOW_LOCK_UPDATE);
rcu_read_lock();
- stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
+ bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
rcu_read_unlock();
- if (unlikely(stale))
+ if (unlikely(stale)) {
+ stale_at = i;
goto err_bucket_stale;
+ }
}
bio = &op->wbio.bio;
@@ -1346,15 +1350,14 @@ err:
if (ret) {
bch_err_inum_offset_ratelimited(c,
- op->pos.inode,
- op->pos.offset << 9,
- "%s: btree lookup error %s",
- __func__, bch2_err_str(ret));
+ op->pos.inode, op->pos.offset << 9,
+ "%s: btree lookup error %s", __func__, bch2_err_str(ret));
op->error = ret;
op->flags |= BCH_WRITE_DONE;
}
bch2_trans_put(trans);
+ darray_exit(&buckets);
/* fallback to cow write path? */
if (!(op->flags & BCH_WRITE_DONE)) {
@@ -1374,24 +1377,21 @@ err:
}
return;
err_get_ioref:
- for (i = 0; i < nr_buckets; i++)
- percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
+ darray_for_each(buckets, i)
+ percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
/* Fall back to COW path: */
goto out;
err_bucket_stale:
- while (i >= 0) {
- bch2_bucket_nocow_unlock(&c->nocow_locks,
- buckets[i].b,
- BUCKET_NOCOW_LOCK_UPDATE);
- --i;
+ darray_for_each(buckets, i) {
+ bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
+ if (i == stale_at)
+ break;
}
- for (i = 0; i < nr_buckets; i++)
- percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
/* We can retry this: */
ret = -BCH_ERR_transaction_restart;
- goto out;
+ goto err_get_ioref;
}
static void __bch2_write(struct bch_write_op *op)
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
index 5e653eb81d54..accf246c3233 100644
--- a/fs/bcachefs/printbuf.c
+++ b/fs/bcachefs/printbuf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: LGPL-2.1+
/* Copyright (C) 2022 Kent Overstreet */
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
@@ -423,3 +424,24 @@ void bch2_prt_bitflags(struct printbuf *out,
flags ^= BIT_ULL(bit);
}
}
+
+void bch2_prt_bitflags_vector(struct printbuf *out,
+ const char * const list[],
+ unsigned long *v, unsigned nr)
+{
+ bool first = true;
+ unsigned i;
+
+ for (i = 0; i < nr; i++)
+ if (!list[i]) {
+ nr = i - 1;
+ break;
+ }
+
+ for_each_set_bit(i, v, nr) {
+ if (!first)
+ bch2_prt_printf(out, ",");
+ first = false;
+ bch2_prt_printf(out, "%s", list[i]);
+ }
+}
diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h
index 2191423d9f22..9a4a56c40937 100644
--- a/fs/bcachefs/printbuf.h
+++ b/fs/bcachefs/printbuf.h
@@ -124,6 +124,8 @@ void bch2_prt_units_u64(struct printbuf *, u64);
void bch2_prt_units_s64(struct printbuf *, s64);
void bch2_prt_string_option(struct printbuf *, const char * const[], size_t);
void bch2_prt_bitflags(struct printbuf *, const char * const[], u64);
+void bch2_prt_bitflags_vector(struct printbuf *, const char * const[],
+ unsigned long *, unsigned);
/* Initializer for a heap allocated printbuf: */
#define PRINTBUF ((struct printbuf) { .heap_allocated = true })
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index c7d9074c82d9..5cf7d0532002 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -27,6 +27,7 @@
#include "recovery.h"
#include "replicas.h"
#include "sb-clean.h"
+#include "sb-downgrade.h"
#include "snapshot.h"
#include "subvolume.h"
#include "super-io.h"
@@ -481,7 +482,7 @@ static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
}
const char * const bch2_recovery_passes[] = {
-#define x(_fn, _when) #_fn,
+#define x(_fn, ...) #_fn,
BCH_RECOVERY_PASSES()
#undef x
NULL
@@ -504,18 +505,47 @@ struct recovery_pass_fn {
};
static struct recovery_pass_fn recovery_pass_fns[] = {
-#define x(_fn, _when) { .fn = bch2_##_fn, .when = _when },
+#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
BCH_RECOVERY_PASSES()
#undef x
};
-static void check_version_upgrade(struct bch_fs *c)
+u64 bch2_recovery_passes_to_stable(u64 v)
+{
+ static const u8 map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+ };
+
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(map[i]);
+ return ret;
+}
+
+u64 bch2_recovery_passes_from_stable(u64 v)
+{
+ static const u8 map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+ };
+
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(map[i]);
+ return ret;
+}
+
+static bool check_version_upgrade(struct bch_fs *c)
{
unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
unsigned latest_version = bcachefs_metadata_version_current;
unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
unsigned new_version = 0;
- u64 recovery_passes;
if (old_version < bcachefs_metadata_required_upgrade_below) {
if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
@@ -559,7 +589,7 @@ static void check_version_upgrade(struct bch_fs *c)
bch2_version_to_text(&buf, new_version);
prt_newline(&buf);
- recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
+ u64 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
if (recovery_passes) {
if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK)
prt_str(&buf, "fsck required");
@@ -574,12 +604,13 @@ static void check_version_upgrade(struct bch_fs *c)
bch_info(c, "%s", buf.buf);
- mutex_lock(&c->sb_lock);
bch2_sb_upgrade(c, new_version);
- mutex_unlock(&c->sb_lock);
printbuf_exit(&buf);
+ return true;
}
+
+ return false;
}
u64 bch2_fsck_recovery_passes(void)
@@ -654,7 +685,6 @@ int bch2_fs_recovery(struct bch_fs *c)
struct bch_sb_field_clean *clean = NULL;
struct jset *last_journal_entry = NULL;
u64 last_seq = 0, blacklist_seq, journal_seq;
- bool write_sb = false;
int ret = 0;
if (c->sb.clean) {
@@ -682,15 +712,73 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery))
- check_version_upgrade(c);
-
if (c->opts.fsck && c->opts.norecovery) {
bch_err(c, "cannot select both norecovery and fsck");
ret = -EINVAL;
goto err;
}
+ if (!(c->opts.nochanges && c->opts.norecovery)) {
+ mutex_lock(&c->sb_lock);
+ bool write_sb = false;
+
+ struct bch_sb_field_ext *ext =
+ bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
+ if (!ext) {
+ ret = -BCH_ERR_ENOSPC_sb;
+ mutex_unlock(&c->sb_lock);
+ goto err;
+ }
+
+ if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
+ ext->recovery_passes_required[0] |=
+ cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
+ write_sb = true;
+ }
+
+ u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ if (sb_passes) {
+ struct printbuf buf = PRINTBUF;
+ prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
+ prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ if (bch2_check_version_downgrade(c)) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "Version downgrade required:\n");
+
+ __le64 passes = ext->recovery_passes_required[0];
+ bch2_sb_set_downgrade(c,
+ BCH_VERSION_MINOR(bcachefs_metadata_version_current),
+ BCH_VERSION_MINOR(c->sb.version));
+ passes = ext->recovery_passes_required[0] & ~passes;
+ if (passes) {
+ prt_str(&buf, " running recovery passes: ");
+ prt_bitflags(&buf, bch2_recovery_passes,
+ bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
+ }
+
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ write_sb = true;
+ }
+
+ if (check_version_upgrade(c))
+ write_sb = true;
+
+ if (write_sb)
+ bch2_write_super(c);
+
+ c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+
ret = bch2_blacklist_table_initialize(c);
if (ret) {
bch_err(c, "error initializing blacklist table");
@@ -827,11 +915,6 @@ use_clean:
if (ret)
goto err;
- if (c->opts.fsck &&
- (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
- BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
-
ret = bch2_run_recovery_passes(c);
if (ret)
goto err;
@@ -868,16 +951,30 @@ use_clean:
}
mutex_lock(&c->sb_lock);
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) {
- SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version);
+ bool write_sb = false;
+
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
+ SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
write_sb = true;
}
- if (!test_bit(BCH_FS_ERROR, &c->flags)) {
+ if (!test_bit(BCH_FS_ERROR, &c->flags) &&
+ !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
write_sb = true;
}
+ if (!test_bit(BCH_FS_ERROR, &c->flags)) {
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+ if (ext &&
+ (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
+ !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
+ memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
+ memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
+ write_sb = true;
+ }
+ }
+
if (c->opts.fsck &&
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
@@ -947,7 +1044,7 @@ int bch2_fs_initialize(struct bch_fs *c)
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
- bch2_sb_maybe_downgrade(c);
+ bch2_check_version_downgrade(c);
if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
bch2_sb_upgrade(c, bcachefs_metadata_version_current);
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index d266aae90200..3a554b0751d0 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -4,6 +4,9 @@
extern const char * const bch2_recovery_passes[];
+u64 bch2_recovery_passes_to_stable(u64 v);
+u64 bch2_recovery_passes_from_stable(u64 v);
+
/*
* For when we need to rewind recovery passes and run a pass we skipped:
*/
diff --git a/fs/bcachefs/recovery_types.h b/fs/bcachefs/recovery_types.h
index 515e3d62c2ac..d37c6fd30e38 100644
--- a/fs/bcachefs/recovery_types.h
+++ b/fs/bcachefs/recovery_types.h
@@ -7,45 +7,57 @@
#define PASS_UNCLEAN BIT(2)
#define PASS_ALWAYS BIT(3)
-#define BCH_RECOVERY_PASSES() \
- x(alloc_read, PASS_ALWAYS) \
- x(stripes_read, PASS_ALWAYS) \
- x(initialize_subvolumes, 0) \
- x(snapshots_read, PASS_ALWAYS) \
- x(check_topology, 0) \
- x(check_allocations, PASS_FSCK) \
- x(trans_mark_dev_sbs, PASS_ALWAYS|PASS_SILENT) \
- x(fs_journal_alloc, PASS_ALWAYS|PASS_SILENT) \
- x(set_may_go_rw, PASS_ALWAYS|PASS_SILENT) \
- x(journal_replay, PASS_ALWAYS) \
- x(check_alloc_info, PASS_FSCK) \
- x(check_lrus, PASS_FSCK) \
- x(check_btree_backpointers, PASS_FSCK) \
- x(check_backpointers_to_extents,PASS_FSCK) \
- x(check_extents_to_backpointers,PASS_FSCK) \
- x(check_alloc_to_lru_refs, PASS_FSCK) \
- x(fs_freespace_init, PASS_ALWAYS|PASS_SILENT) \
- x(bucket_gens_init, 0) \
- x(check_snapshot_trees, PASS_FSCK) \
- x(check_snapshots, PASS_FSCK) \
- x(check_subvols, PASS_FSCK) \
- x(delete_dead_snapshots, PASS_FSCK) \
- x(fs_upgrade_for_subvolumes, 0) \
- x(resume_logged_ops, PASS_ALWAYS) \
- x(check_inodes, PASS_FSCK) \
- x(check_extents, PASS_FSCK) \
- x(check_indirect_extents, PASS_FSCK) \
- x(check_dirents, PASS_FSCK) \
- x(check_xattrs, PASS_FSCK) \
- x(check_root, PASS_FSCK) \
- x(check_directory_structure, PASS_FSCK) \
- x(check_nlinks, PASS_FSCK) \
- x(delete_dead_inodes, PASS_FSCK|PASS_UNCLEAN) \
- x(fix_reflink_p, 0) \
- x(set_fs_needs_rebalance, 0) \
+/*
+ * Passes may be reordered, but the second field is a persistent identifier and
+ * must never change:
+ */
+#define BCH_RECOVERY_PASSES() \
+ x(alloc_read, 0, PASS_ALWAYS) \
+ x(stripes_read, 1, PASS_ALWAYS) \
+ x(initialize_subvolumes, 2, 0) \
+ x(snapshots_read, 3, PASS_ALWAYS) \
+ x(check_topology, 4, 0) \
+ x(check_allocations, 5, PASS_FSCK) \
+ x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT) \
+ x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT) \
+ x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \
+ x(journal_replay, 9, PASS_ALWAYS) \
+ x(check_alloc_info, 10, PASS_FSCK) \
+ x(check_lrus, 11, PASS_FSCK) \
+ x(check_btree_backpointers, 12, PASS_FSCK) \
+ x(check_backpointers_to_extents, 13, PASS_FSCK) \
+ x(check_extents_to_backpointers, 14, PASS_FSCK) \
+ x(check_alloc_to_lru_refs, 15, PASS_FSCK) \
+ x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \
+ x(bucket_gens_init, 17, 0) \
+ x(check_snapshot_trees, 18, PASS_FSCK) \
+ x(check_snapshots, 19, PASS_FSCK) \
+ x(check_subvols, 20, PASS_FSCK) \
+ x(delete_dead_snapshots, 21, PASS_FSCK) \
+ x(fs_upgrade_for_subvolumes, 22, 0) \
+ x(resume_logged_ops, 23, PASS_ALWAYS) \
+ x(check_inodes, 24, PASS_FSCK) \
+ x(check_extents, 25, PASS_FSCK) \
+ x(check_indirect_extents, 26, PASS_FSCK) \
+ x(check_dirents, 27, PASS_FSCK) \
+ x(check_xattrs, 28, PASS_FSCK) \
+ x(check_root, 29, PASS_FSCK) \
+ x(check_directory_structure, 30, PASS_FSCK) \
+ x(check_nlinks, 31, PASS_FSCK) \
+ x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
+ x(fix_reflink_p, 33, 0) \
+ x(set_fs_needs_rebalance, 34, 0) \
+/* We normally enumerate recovery passes in the order we run them: */
enum bch_recovery_pass {
-#define x(n, when) BCH_RECOVERY_PASS_##n,
+#define x(n, id, when) BCH_RECOVERY_PASS_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+/* But we also need stable identifiers that can be used in the superblock */
+enum bch_recovery_pass_stable {
+#define x(n, id, when) BCH_RECOVERY_PASS_STABLE_##n = id,
BCH_RECOVERY_PASSES()
#undef x
};
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index e151ada1c8bd..c76ad8ea5e4a 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -332,8 +332,6 @@ int bch2_fs_mark_dirty(struct bch_fs *c)
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
-
- bch2_sb_maybe_downgrade(c);
c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
ret = bch2_write_super(c);
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
new file mode 100644
index 000000000000..4919237bbe73
--- /dev/null
+++ b/fs/bcachefs/sb-downgrade.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Superblock section that contains a list of recovery passes to run when
+ * downgrading past a given version
+ */
+
+#include "bcachefs.h"
+#include "darray.h"
+#include "recovery.h"
+#include "sb-downgrade.h"
+#include "sb-errors.h"
+#include "super-io.h"
+
+/*
+ * Downgrade table:
+ * When dowgrading past certain versions, we need to run certain recovery passes
+ * and fix certain errors:
+ *
+ * x(version, recovery_passes, errors...)
+ */
+
+#define DOWNGRADE_TABLE()
+
+struct downgrade_entry {
+ u64 recovery_passes;
+ u16 version;
+ u16 nr_errors;
+ const u16 *errors;
+};
+
+#define x(ver, passes, ...) static const u16 ver_##errors[] = { __VA_ARGS__ };
+DOWNGRADE_TABLE()
+#undef x
+
+static const struct downgrade_entry downgrade_table[] = {
+#define x(ver, passes, ...) { \
+ .recovery_passes = passes, \
+ .version = bcachefs_metadata_version_##ver,\
+ .nr_errors = ARRAY_SIZE(ver_##errors), \
+ .errors = ver_##errors, \
+},
+DOWNGRADE_TABLE()
+#undef x
+};
+
+static inline const struct bch_sb_field_downgrade_entry *
+downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry *e)
+{
+ return (void *) &e->errors[le16_to_cpu(e->nr_errors)];
+}
+
+#define for_each_downgrade_entry(_d, _i) \
+ for (const struct bch_sb_field_downgrade_entry *_i = (_d)->entries; \
+ (void *) _i < vstruct_end(&(_d)->field) && \
+ (void *) &_i->errors[0] < vstruct_end(&(_d)->field); \
+ _i = downgrade_entry_next_c(_i))
+
+static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
+
+ for_each_downgrade_entry(e, i) {
+ if (BCH_VERSION_MAJOR(le16_to_cpu(i->version)) !=
+ BCH_VERSION_MAJOR(le16_to_cpu(sb->version))) {
+ prt_printf(err, "downgrade entry with mismatched major version (%u != %u)",
+ BCH_VERSION_MAJOR(le16_to_cpu(i->version)),
+ BCH_VERSION_MAJOR(le16_to_cpu(sb->version)));
+ return -BCH_ERR_invalid_sb_downgrade;
+ }
+ }
+
+ return 0;
+}
+
+static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
+
+ if (out->nr_tabstops <= 1)
+ printbuf_tabstop_push(out, 16);
+
+ for_each_downgrade_entry(e, i) {
+ prt_str(out, "version:");
+ prt_tab(out);
+ bch2_version_to_text(out, le16_to_cpu(i->version));
+ prt_newline(out);
+
+ prt_str(out, "recovery passes:");
+ prt_tab(out);
+ prt_bitflags(out, bch2_recovery_passes,
+ bch2_recovery_passes_from_stable(le64_to_cpu(i->recovery_passes[0])));
+ prt_newline(out);
+
+ prt_str(out, "errors:");
+ prt_tab(out);
+ bool first = true;
+ for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
+ if (!first)
+ prt_char(out, ',');
+ first = false;
+ unsigned e = le16_to_cpu(i->errors[j]);
+ prt_str(out, e < BCH_SB_ERR_MAX ? bch2_sb_error_strs[e] : "(unknown)");
+ }
+ prt_newline(out);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_downgrade = {
+ .validate = bch2_sb_downgrade_validate,
+ .to_text = bch2_sb_downgrade_to_text,
+};
+
+int bch2_sb_downgrade_update(struct bch_fs *c)
+{
+ darray_char table = {};
+ int ret = 0;
+
+ for (const struct downgrade_entry *src = downgrade_table;
+ src < downgrade_table + ARRAY_SIZE(downgrade_table);
+ src++) {
+ if (BCH_VERSION_MAJOR(src->version) != BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version)))
+ continue;
+
+ struct bch_sb_field_downgrade_entry *dst;
+ unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * src->nr_errors;
+
+ ret = darray_make_room(&table, bytes);
+ if (ret)
+ goto out;
+
+ dst = (void *) &darray_top(table);
+ dst->version = cpu_to_le16(src->version);
+ dst->recovery_passes[0] = cpu_to_le64(src->recovery_passes);
+ dst->recovery_passes[1] = 0;
+ dst->nr_errors = cpu_to_le16(src->nr_errors);
+ for (unsigned i = 0; i < src->nr_errors; i++)
+ dst->errors[i] = cpu_to_le16(src->errors[i]);
+
+ table.nr += bytes;
+ }
+
+ struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
+
+ unsigned sb_u64s = DIV_ROUND_UP(sizeof(*d) + table.nr, sizeof(u64));
+
+ if (d && le32_to_cpu(d->field.u64s) > sb_u64s)
+ goto out;
+
+ d = bch2_sb_field_resize(&c->disk_sb, downgrade, sb_u64s);
+ if (!d) {
+ ret = -BCH_ERR_ENOSPC_sb_downgrade;
+ goto out;
+ }
+
+ memcpy(d->entries, table.data, table.nr);
+ memset_u64s_tail(d->entries, 0, table.nr);
+out:
+ darray_exit(&table);
+ return ret;
+}
+
+void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_minor)
+{
+ struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
+ if (!d)
+ return;
+
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+
+ for_each_downgrade_entry(d, i) {
+ unsigned minor = BCH_VERSION_MINOR(le16_to_cpu(i->version));
+ if (new_minor < minor && minor <= old_minor) {
+ ext->recovery_passes_required[0] |= i->recovery_passes[0];
+ ext->recovery_passes_required[1] |= i->recovery_passes[1];
+
+ for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
+ unsigned e = le16_to_cpu(i->errors[j]);
+ if (e < BCH_SB_ERR_MAX)
+ __set_bit(e, c->sb.errors_silent);
+ if (e < sizeof(ext->errors_silent) * 8)
+ ext->errors_silent[e / 64] |= cpu_to_le64(BIT_ULL(e % 64));
+ }
+ }
+ }
+}
diff --git a/fs/bcachefs/sb-downgrade.h b/fs/bcachefs/sb-downgrade.h
new file mode 100644
index 000000000000..bc48fd2ca70e
--- /dev/null
+++ b/fs/bcachefs/sb-downgrade.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_DOWNGRADE_H
+#define _BCACHEFS_SB_DOWNGRADE_H
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_downgrade;
+
+int bch2_sb_downgrade_update(struct bch_fs *);
+void bch2_sb_set_downgrade(struct bch_fs *, unsigned, unsigned);
+
+#endif /* _BCACHEFS_SB_DOWNGRADE_H */
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index f0930ab7f036..5f5bcae391fb 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -4,7 +4,7 @@
#include "sb-errors.h"
#include "super-io.h"
-static const char * const bch2_sb_error_strs[] = {
+const char * const bch2_sb_error_strs[] = {
#define x(t, n, ...) [n] = #t,
BCH_SB_ERRS()
NULL
@@ -20,9 +20,7 @@ static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id
static inline unsigned bch2_sb_field_errors_nr_entries(struct bch_sb_field_errors *e)
{
- return e
- ? (bch2_sb_field_bytes(&e->field) - sizeof(*e)) / sizeof(e->entries[0])
- : 0;
+ return bch2_sb_field_nr_entries(e);
}
static inline unsigned bch2_sb_field_errors_u64s(unsigned nr)
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
index 7557fe94f06d..8889001e7db4 100644
--- a/fs/bcachefs/sb-errors.h
+++ b/fs/bcachefs/sb-errors.h
@@ -4,258 +4,7 @@
#include "sb-errors_types.h"
-#define BCH_SB_ERRS() \
- x(clean_but_journal_not_empty, 0) \
- x(dirty_but_no_journal_entries, 1) \
- x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \
- x(sb_clean_journal_seq_mismatch, 3) \
- x(sb_clean_btree_root_mismatch, 4) \
- x(sb_clean_missing, 5) \
- x(jset_unsupported_version, 6) \
- x(jset_unknown_csum, 7) \
- x(jset_last_seq_newer_than_seq, 8) \
- x(jset_past_bucket_end, 9) \
- x(jset_seq_blacklisted, 10) \
- x(journal_entries_missing, 11) \
- x(journal_entry_replicas_not_marked, 12) \
- x(journal_entry_past_jset_end, 13) \
- x(journal_entry_replicas_data_mismatch, 14) \
- x(journal_entry_bkey_u64s_0, 15) \
- x(journal_entry_bkey_past_end, 16) \
- x(journal_entry_bkey_bad_format, 17) \
- x(journal_entry_bkey_invalid, 18) \
- x(journal_entry_btree_root_bad_size, 19) \
- x(journal_entry_blacklist_bad_size, 20) \
- x(journal_entry_blacklist_v2_bad_size, 21) \
- x(journal_entry_blacklist_v2_start_past_end, 22) \
- x(journal_entry_usage_bad_size, 23) \
- x(journal_entry_data_usage_bad_size, 24) \
- x(journal_entry_clock_bad_size, 25) \
- x(journal_entry_clock_bad_rw, 26) \
- x(journal_entry_dev_usage_bad_size, 27) \
- x(journal_entry_dev_usage_bad_dev, 28) \
- x(journal_entry_dev_usage_bad_pad, 29) \
- x(btree_node_unreadable, 30) \
- x(btree_node_fault_injected, 31) \
- x(btree_node_bad_magic, 32) \
- x(btree_node_bad_seq, 33) \
- x(btree_node_unsupported_version, 34) \
- x(btree_node_bset_older_than_sb_min, 35) \
- x(btree_node_bset_newer_than_sb, 36) \
- x(btree_node_data_missing, 37) \
- x(btree_node_bset_after_end, 38) \
- x(btree_node_replicas_sectors_written_mismatch, 39) \
- x(btree_node_replicas_data_mismatch, 40) \
- x(bset_unknown_csum, 41) \
- x(bset_bad_csum, 42) \
- x(bset_past_end_of_btree_node, 43) \
- x(bset_wrong_sector_offset, 44) \
- x(bset_empty, 45) \
- x(bset_bad_seq, 46) \
- x(bset_blacklisted_journal_seq, 47) \
- x(first_bset_blacklisted_journal_seq, 48) \
- x(btree_node_bad_btree, 49) \
- x(btree_node_bad_level, 50) \
- x(btree_node_bad_min_key, 51) \
- x(btree_node_bad_max_key, 52) \
- x(btree_node_bad_format, 53) \
- x(btree_node_bkey_past_bset_end, 54) \
- x(btree_node_bkey_bad_format, 55) \
- x(btree_node_bad_bkey, 56) \
- x(btree_node_bkey_out_of_order, 57) \
- x(btree_root_bkey_invalid, 58) \
- x(btree_root_read_error, 59) \
- x(btree_root_bad_min_key, 60) \
- x(btree_root_bad_max_key, 61) \
- x(btree_node_read_error, 62) \
- x(btree_node_topology_bad_min_key, 63) \
- x(btree_node_topology_bad_max_key, 64) \
- x(btree_node_topology_overwritten_by_prev_node, 65) \
- x(btree_node_topology_overwritten_by_next_node, 66) \
- x(btree_node_topology_interior_node_empty, 67) \
- x(fs_usage_hidden_wrong, 68) \
- x(fs_usage_btree_wrong, 69) \
- x(fs_usage_data_wrong, 70) \
- x(fs_usage_cached_wrong, 71) \
- x(fs_usage_reserved_wrong, 72) \
- x(fs_usage_persistent_reserved_wrong, 73) \
- x(fs_usage_nr_inodes_wrong, 74) \
- x(fs_usage_replicas_wrong, 75) \
- x(dev_usage_buckets_wrong, 76) \
- x(dev_usage_sectors_wrong, 77) \
- x(dev_usage_fragmented_wrong, 78) \
- x(dev_usage_buckets_ec_wrong, 79) \
- x(bkey_version_in_future, 80) \
- x(bkey_u64s_too_small, 81) \
- x(bkey_invalid_type_for_btree, 82) \
- x(bkey_extent_size_zero, 83) \
- x(bkey_extent_size_greater_than_offset, 84) \
- x(bkey_size_nonzero, 85) \
- x(bkey_snapshot_nonzero, 86) \
- x(bkey_snapshot_zero, 87) \
- x(bkey_at_pos_max, 88) \
- x(bkey_before_start_of_btree_node, 89) \
- x(bkey_after_end_of_btree_node, 90) \
- x(bkey_val_size_nonzero, 91) \
- x(bkey_val_size_too_small, 92) \
- x(alloc_v1_val_size_bad, 93) \
- x(alloc_v2_unpack_error, 94) \
- x(alloc_v3_unpack_error, 95) \
- x(alloc_v4_val_size_bad, 96) \
- x(alloc_v4_backpointers_start_bad, 97) \
- x(alloc_key_data_type_bad, 98) \
- x(alloc_key_empty_but_have_data, 99) \
- x(alloc_key_dirty_sectors_0, 100) \
- x(alloc_key_data_type_inconsistency, 101) \
- x(alloc_key_to_missing_dev_bucket, 102) \
- x(alloc_key_cached_inconsistency, 103) \
- x(alloc_key_cached_but_read_time_zero, 104) \
- x(alloc_key_to_missing_lru_entry, 105) \
- x(alloc_key_data_type_wrong, 106) \
- x(alloc_key_gen_wrong, 107) \
- x(alloc_key_dirty_sectors_wrong, 108) \
- x(alloc_key_cached_sectors_wrong, 109) \
- x(alloc_key_stripe_wrong, 110) \
- x(alloc_key_stripe_redundancy_wrong, 111) \
- x(bucket_sector_count_overflow, 112) \
- x(bucket_metadata_type_mismatch, 113) \
- x(need_discard_key_wrong, 114) \
- x(freespace_key_wrong, 115) \
- x(freespace_hole_missing, 116) \
- x(bucket_gens_val_size_bad, 117) \
- x(bucket_gens_key_wrong, 118) \
- x(bucket_gens_hole_wrong, 119) \
- x(bucket_gens_to_invalid_dev, 120) \
- x(bucket_gens_to_invalid_buckets, 121) \
- x(bucket_gens_nonzero_for_invalid_buckets, 122) \
- x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
- x(need_discard_freespace_key_bad, 124) \
- x(backpointer_pos_wrong, 125) \
- x(backpointer_to_missing_device, 126) \
- x(backpointer_to_missing_alloc, 127) \
- x(backpointer_to_missing_ptr, 128) \
- x(lru_entry_at_time_0, 129) \
- x(lru_entry_to_invalid_bucket, 130) \
- x(lru_entry_bad, 131) \
- x(btree_ptr_val_too_big, 132) \
- x(btree_ptr_v2_val_too_big, 133) \
- x(btree_ptr_has_non_ptr, 134) \
- x(extent_ptrs_invalid_entry, 135) \
- x(extent_ptrs_no_ptrs, 136) \
- x(extent_ptrs_too_many_ptrs, 137) \
- x(extent_ptrs_redundant_crc, 138) \
- x(extent_ptrs_redundant_stripe, 139) \
- x(extent_ptrs_unwritten, 140) \
- x(extent_ptrs_written_and_unwritten, 141) \
- x(ptr_to_invalid_device, 142) \
- x(ptr_to_duplicate_device, 143) \
- x(ptr_after_last_bucket, 144) \
- x(ptr_before_first_bucket, 145) \
- x(ptr_spans_multiple_buckets, 146) \
- x(ptr_to_missing_backpointer, 147) \
- x(ptr_to_missing_alloc_key, 148) \
- x(ptr_to_missing_replicas_entry, 149) \
- x(ptr_to_missing_stripe, 150) \
- x(ptr_to_incorrect_stripe, 151) \
- x(ptr_gen_newer_than_bucket_gen, 152) \
- x(ptr_too_stale, 153) \
- x(stale_dirty_ptr, 154) \
- x(ptr_bucket_data_type_mismatch, 155) \
- x(ptr_cached_and_erasure_coded, 156) \
- x(ptr_crc_uncompressed_size_too_small, 157) \
- x(ptr_crc_csum_type_unknown, 158) \
- x(ptr_crc_compression_type_unknown, 159) \
- x(ptr_crc_redundant, 160) \
- x(ptr_crc_uncompressed_size_too_big, 161) \
- x(ptr_crc_nonce_mismatch, 162) \
- x(ptr_stripe_redundant, 163) \
- x(reservation_key_nr_replicas_invalid, 164) \
- x(reflink_v_refcount_wrong, 165) \
- x(reflink_p_to_missing_reflink_v, 166) \
- x(stripe_pos_bad, 167) \
- x(stripe_val_size_bad, 168) \
- x(stripe_sector_count_wrong, 169) \
- x(snapshot_tree_pos_bad, 170) \
- x(snapshot_tree_to_missing_snapshot, 171) \
- x(snapshot_tree_to_missing_subvol, 172) \
- x(snapshot_tree_to_wrong_subvol, 173) \
- x(snapshot_tree_to_snapshot_subvol, 174) \
- x(snapshot_pos_bad, 175) \
- x(snapshot_parent_bad, 176) \
- x(snapshot_children_not_normalized, 177) \
- x(snapshot_child_duplicate, 178) \
- x(snapshot_child_bad, 179) \
- x(snapshot_skiplist_not_normalized, 180) \
- x(snapshot_skiplist_bad, 181) \
- x(snapshot_should_not_have_subvol, 182) \
- x(snapshot_to_bad_snapshot_tree, 183) \
- x(snapshot_bad_depth, 184) \
- x(snapshot_bad_skiplist, 185) \
- x(subvol_pos_bad, 186) \
- x(subvol_not_master_and_not_snapshot, 187) \
- x(subvol_to_missing_root, 188) \
- x(subvol_root_wrong_bi_subvol, 189) \
- x(bkey_in_missing_snapshot, 190) \
- x(inode_pos_inode_nonzero, 191) \
- x(inode_pos_blockdev_range, 192) \
- x(inode_unpack_error, 193) \
- x(inode_str_hash_invalid, 194) \
- x(inode_v3_fields_start_bad, 195) \
- x(inode_snapshot_mismatch, 196) \
- x(inode_unlinked_but_clean, 197) \
- x(inode_unlinked_but_nlink_nonzero, 198) \
- x(inode_checksum_type_invalid, 199) \
- x(inode_compression_type_invalid, 200) \
- x(inode_subvol_root_but_not_dir, 201) \
- x(inode_i_size_dirty_but_clean, 202) \
- x(inode_i_sectors_dirty_but_clean, 203) \
- x(inode_i_sectors_wrong, 204) \
- x(inode_dir_wrong_nlink, 205) \
- x(inode_dir_multiple_links, 206) \
- x(inode_multiple_links_but_nlink_0, 207) \
- x(inode_wrong_backpointer, 208) \
- x(inode_wrong_nlink, 209) \
- x(inode_unreachable, 210) \
- x(deleted_inode_but_clean, 211) \
- x(deleted_inode_missing, 212) \
- x(deleted_inode_is_dir, 213) \
- x(deleted_inode_not_unlinked, 214) \
- x(extent_overlapping, 215) \
- x(extent_in_missing_inode, 216) \
- x(extent_in_non_reg_inode, 217) \
- x(extent_past_end_of_inode, 218) \
- x(dirent_empty_name, 219) \
- x(dirent_val_too_big, 220) \
- x(dirent_name_too_long, 221) \
- x(dirent_name_embedded_nul, 222) \
- x(dirent_name_dot_or_dotdot, 223) \
- x(dirent_name_has_slash, 224) \
- x(dirent_d_type_wrong, 225) \
- x(dirent_d_parent_subvol_wrong, 226) \
- x(dirent_in_missing_dir_inode, 227) \
- x(dirent_in_non_dir_inode, 228) \
- x(dirent_to_missing_inode, 229) \
- x(dirent_to_missing_subvol, 230) \
- x(dirent_to_itself, 231) \
- x(quota_type_invalid, 232) \
- x(xattr_val_size_too_small, 233) \
- x(xattr_val_size_too_big, 234) \
- x(xattr_invalid_type, 235) \
- x(xattr_name_invalid_chars, 236) \
- x(xattr_in_missing_inode, 237) \
- x(root_subvol_missing, 238) \
- x(root_dir_missing, 239) \
- x(root_inode_not_dir, 240) \
- x(dir_loop, 241) \
- x(hash_table_key_duplicate, 242) \
- x(hash_table_key_wrong_offset, 243)
-
-enum bch_sb_error_id {
-#define x(t, n) BCH_FSCK_ERR_##t = n,
- BCH_SB_ERRS()
-#undef x
- BCH_SB_ERR_MAX
-};
+extern const char * const bch2_sb_error_strs[];
extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
index b1c099843a39..3504c2d09c29 100644
--- a/fs/bcachefs/sb-errors_types.h
+++ b/fs/bcachefs/sb-errors_types.h
@@ -4,6 +4,259 @@
#include "darray.h"
+#define BCH_SB_ERRS() \
+ x(clean_but_journal_not_empty, 0) \
+ x(dirty_but_no_journal_entries, 1) \
+ x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \
+ x(sb_clean_journal_seq_mismatch, 3) \
+ x(sb_clean_btree_root_mismatch, 4) \
+ x(sb_clean_missing, 5) \
+ x(jset_unsupported_version, 6) \
+ x(jset_unknown_csum, 7) \
+ x(jset_last_seq_newer_than_seq, 8) \
+ x(jset_past_bucket_end, 9) \
+ x(jset_seq_blacklisted, 10) \
+ x(journal_entries_missing, 11) \
+ x(journal_entry_replicas_not_marked, 12) \
+ x(journal_entry_past_jset_end, 13) \
+ x(journal_entry_replicas_data_mismatch, 14) \
+ x(journal_entry_bkey_u64s_0, 15) \
+ x(journal_entry_bkey_past_end, 16) \
+ x(journal_entry_bkey_bad_format, 17) \
+ x(journal_entry_bkey_invalid, 18) \
+ x(journal_entry_btree_root_bad_size, 19) \
+ x(journal_entry_blacklist_bad_size, 20) \
+ x(journal_entry_blacklist_v2_bad_size, 21) \
+ x(journal_entry_blacklist_v2_start_past_end, 22) \
+ x(journal_entry_usage_bad_size, 23) \
+ x(journal_entry_data_usage_bad_size, 24) \
+ x(journal_entry_clock_bad_size, 25) \
+ x(journal_entry_clock_bad_rw, 26) \
+ x(journal_entry_dev_usage_bad_size, 27) \
+ x(journal_entry_dev_usage_bad_dev, 28) \
+ x(journal_entry_dev_usage_bad_pad, 29) \
+ x(btree_node_unreadable, 30) \
+ x(btree_node_fault_injected, 31) \
+ x(btree_node_bad_magic, 32) \
+ x(btree_node_bad_seq, 33) \
+ x(btree_node_unsupported_version, 34) \
+ x(btree_node_bset_older_than_sb_min, 35) \
+ x(btree_node_bset_newer_than_sb, 36) \
+ x(btree_node_data_missing, 37) \
+ x(btree_node_bset_after_end, 38) \
+ x(btree_node_replicas_sectors_written_mismatch, 39) \
+ x(btree_node_replicas_data_mismatch, 40) \
+ x(bset_unknown_csum, 41) \
+ x(bset_bad_csum, 42) \
+ x(bset_past_end_of_btree_node, 43) \
+ x(bset_wrong_sector_offset, 44) \
+ x(bset_empty, 45) \
+ x(bset_bad_seq, 46) \
+ x(bset_blacklisted_journal_seq, 47) \
+ x(first_bset_blacklisted_journal_seq, 48) \
+ x(btree_node_bad_btree, 49) \
+ x(btree_node_bad_level, 50) \
+ x(btree_node_bad_min_key, 51) \
+ x(btree_node_bad_max_key, 52) \
+ x(btree_node_bad_format, 53) \
+ x(btree_node_bkey_past_bset_end, 54) \
+ x(btree_node_bkey_bad_format, 55) \
+ x(btree_node_bad_bkey, 56) \
+ x(btree_node_bkey_out_of_order, 57) \
+ x(btree_root_bkey_invalid, 58) \
+ x(btree_root_read_error, 59) \
+ x(btree_root_bad_min_key, 60) \
+ x(btree_root_bad_max_key, 61) \
+ x(btree_node_read_error, 62) \
+ x(btree_node_topology_bad_min_key, 63) \
+ x(btree_node_topology_bad_max_key, 64) \
+ x(btree_node_topology_overwritten_by_prev_node, 65) \
+ x(btree_node_topology_overwritten_by_next_node, 66) \
+ x(btree_node_topology_interior_node_empty, 67) \
+ x(fs_usage_hidden_wrong, 68) \
+ x(fs_usage_btree_wrong, 69) \
+ x(fs_usage_data_wrong, 70) \
+ x(fs_usage_cached_wrong, 71) \
+ x(fs_usage_reserved_wrong, 72) \
+ x(fs_usage_persistent_reserved_wrong, 73) \
+ x(fs_usage_nr_inodes_wrong, 74) \
+ x(fs_usage_replicas_wrong, 75) \
+ x(dev_usage_buckets_wrong, 76) \
+ x(dev_usage_sectors_wrong, 77) \
+ x(dev_usage_fragmented_wrong, 78) \
+ x(dev_usage_buckets_ec_wrong, 79) \
+ x(bkey_version_in_future, 80) \
+ x(bkey_u64s_too_small, 81) \
+ x(bkey_invalid_type_for_btree, 82) \
+ x(bkey_extent_size_zero, 83) \
+ x(bkey_extent_size_greater_than_offset, 84) \
+ x(bkey_size_nonzero, 85) \
+ x(bkey_snapshot_nonzero, 86) \
+ x(bkey_snapshot_zero, 87) \
+ x(bkey_at_pos_max, 88) \
+ x(bkey_before_start_of_btree_node, 89) \
+ x(bkey_after_end_of_btree_node, 90) \
+ x(bkey_val_size_nonzero, 91) \
+ x(bkey_val_size_too_small, 92) \
+ x(alloc_v1_val_size_bad, 93) \
+ x(alloc_v2_unpack_error, 94) \
+ x(alloc_v3_unpack_error, 95) \
+ x(alloc_v4_val_size_bad, 96) \
+ x(alloc_v4_backpointers_start_bad, 97) \
+ x(alloc_key_data_type_bad, 98) \
+ x(alloc_key_empty_but_have_data, 99) \
+ x(alloc_key_dirty_sectors_0, 100) \
+ x(alloc_key_data_type_inconsistency, 101) \
+ x(alloc_key_to_missing_dev_bucket, 102) \
+ x(alloc_key_cached_inconsistency, 103) \
+ x(alloc_key_cached_but_read_time_zero, 104) \
+ x(alloc_key_to_missing_lru_entry, 105) \
+ x(alloc_key_data_type_wrong, 106) \
+ x(alloc_key_gen_wrong, 107) \
+ x(alloc_key_dirty_sectors_wrong, 108) \
+ x(alloc_key_cached_sectors_wrong, 109) \
+ x(alloc_key_stripe_wrong, 110) \
+ x(alloc_key_stripe_redundancy_wrong, 111) \
+ x(bucket_sector_count_overflow, 112) \
+ x(bucket_metadata_type_mismatch, 113) \
+ x(need_discard_key_wrong, 114) \
+ x(freespace_key_wrong, 115) \
+ x(freespace_hole_missing, 116) \
+ x(bucket_gens_val_size_bad, 117) \
+ x(bucket_gens_key_wrong, 118) \
+ x(bucket_gens_hole_wrong, 119) \
+ x(bucket_gens_to_invalid_dev, 120) \
+ x(bucket_gens_to_invalid_buckets, 121) \
+ x(bucket_gens_nonzero_for_invalid_buckets, 122) \
+ x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
+ x(need_discard_freespace_key_bad, 124) \
+ x(backpointer_pos_wrong, 125) \
+ x(backpointer_to_missing_device, 126) \
+ x(backpointer_to_missing_alloc, 127) \
+ x(backpointer_to_missing_ptr, 128) \
+ x(lru_entry_at_time_0, 129) \
+ x(lru_entry_to_invalid_bucket, 130) \
+ x(lru_entry_bad, 131) \
+ x(btree_ptr_val_too_big, 132) \
+ x(btree_ptr_v2_val_too_big, 133) \
+ x(btree_ptr_has_non_ptr, 134) \
+ x(extent_ptrs_invalid_entry, 135) \
+ x(extent_ptrs_no_ptrs, 136) \
+ x(extent_ptrs_too_many_ptrs, 137) \
+ x(extent_ptrs_redundant_crc, 138) \
+ x(extent_ptrs_redundant_stripe, 139) \
+ x(extent_ptrs_unwritten, 140) \
+ x(extent_ptrs_written_and_unwritten, 141) \
+ x(ptr_to_invalid_device, 142) \
+ x(ptr_to_duplicate_device, 143) \
+ x(ptr_after_last_bucket, 144) \
+ x(ptr_before_first_bucket, 145) \
+ x(ptr_spans_multiple_buckets, 146) \
+ x(ptr_to_missing_backpointer, 147) \
+ x(ptr_to_missing_alloc_key, 148) \
+ x(ptr_to_missing_replicas_entry, 149) \
+ x(ptr_to_missing_stripe, 150) \
+ x(ptr_to_incorrect_stripe, 151) \
+ x(ptr_gen_newer_than_bucket_gen, 152) \
+ x(ptr_too_stale, 153) \
+ x(stale_dirty_ptr, 154) \
+ x(ptr_bucket_data_type_mismatch, 155) \
+ x(ptr_cached_and_erasure_coded, 156) \
+ x(ptr_crc_uncompressed_size_too_small, 157) \
+ x(ptr_crc_csum_type_unknown, 158) \
+ x(ptr_crc_compression_type_unknown, 159) \
+ x(ptr_crc_redundant, 160) \
+ x(ptr_crc_uncompressed_size_too_big, 161) \
+ x(ptr_crc_nonce_mismatch, 162) \
+ x(ptr_stripe_redundant, 163) \
+ x(reservation_key_nr_replicas_invalid, 164) \
+ x(reflink_v_refcount_wrong, 165) \
+ x(reflink_p_to_missing_reflink_v, 166) \
+ x(stripe_pos_bad, 167) \
+ x(stripe_val_size_bad, 168) \
+ x(stripe_sector_count_wrong, 169) \
+ x(snapshot_tree_pos_bad, 170) \
+ x(snapshot_tree_to_missing_snapshot, 171) \
+ x(snapshot_tree_to_missing_subvol, 172) \
+ x(snapshot_tree_to_wrong_subvol, 173) \
+ x(snapshot_tree_to_snapshot_subvol, 174) \
+ x(snapshot_pos_bad, 175) \
+ x(snapshot_parent_bad, 176) \
+ x(snapshot_children_not_normalized, 177) \
+ x(snapshot_child_duplicate, 178) \
+ x(snapshot_child_bad, 179) \
+ x(snapshot_skiplist_not_normalized, 180) \
+ x(snapshot_skiplist_bad, 181) \
+ x(snapshot_should_not_have_subvol, 182) \
+ x(snapshot_to_bad_snapshot_tree, 183) \
+ x(snapshot_bad_depth, 184) \
+ x(snapshot_bad_skiplist, 185) \
+ x(subvol_pos_bad, 186) \
+ x(subvol_not_master_and_not_snapshot, 187) \
+ x(subvol_to_missing_root, 188) \
+ x(subvol_root_wrong_bi_subvol, 189) \
+ x(bkey_in_missing_snapshot, 190) \
+ x(inode_pos_inode_nonzero, 191) \
+ x(inode_pos_blockdev_range, 192) \
+ x(inode_unpack_error, 193) \
+ x(inode_str_hash_invalid, 194) \
+ x(inode_v3_fields_start_bad, 195) \
+ x(inode_snapshot_mismatch, 196) \
+ x(inode_unlinked_but_clean, 197) \
+ x(inode_unlinked_but_nlink_nonzero, 198) \
+ x(inode_checksum_type_invalid, 199) \
+ x(inode_compression_type_invalid, 200) \
+ x(inode_subvol_root_but_not_dir, 201) \
+ x(inode_i_size_dirty_but_clean, 202) \
+ x(inode_i_sectors_dirty_but_clean, 203) \
+ x(inode_i_sectors_wrong, 204) \
+ x(inode_dir_wrong_nlink, 205) \
+ x(inode_dir_multiple_links, 206) \
+ x(inode_multiple_links_but_nlink_0, 207) \
+ x(inode_wrong_backpointer, 208) \
+ x(inode_wrong_nlink, 209) \
+ x(inode_unreachable, 210) \
+ x(deleted_inode_but_clean, 211) \
+ x(deleted_inode_missing, 212) \
+ x(deleted_inode_is_dir, 213) \
+ x(deleted_inode_not_unlinked, 214) \
+ x(extent_overlapping, 215) \
+ x(extent_in_missing_inode, 216) \
+ x(extent_in_non_reg_inode, 217) \
+ x(extent_past_end_of_inode, 218) \
+ x(dirent_empty_name, 219) \
+ x(dirent_val_too_big, 220) \
+ x(dirent_name_too_long, 221) \
+ x(dirent_name_embedded_nul, 222) \
+ x(dirent_name_dot_or_dotdot, 223) \
+ x(dirent_name_has_slash, 224) \
+ x(dirent_d_type_wrong, 225) \
+ x(dirent_d_parent_subvol_wrong, 226) \
+ x(dirent_in_missing_dir_inode, 227) \
+ x(dirent_in_non_dir_inode, 228) \
+ x(dirent_to_missing_inode, 229) \
+ x(dirent_to_missing_subvol, 230) \
+ x(dirent_to_itself, 231) \
+ x(quota_type_invalid, 232) \
+ x(xattr_val_size_too_small, 233) \
+ x(xattr_val_size_too_big, 234) \
+ x(xattr_invalid_type, 235) \
+ x(xattr_name_invalid_chars, 236) \
+ x(xattr_in_missing_inode, 237) \
+ x(root_subvol_missing, 238) \
+ x(root_dir_missing, 239) \
+ x(root_inode_not_dir, 240) \
+ x(dir_loop, 241) \
+ x(hash_table_key_duplicate, 242) \
+ x(hash_table_key_wrong_offset, 243)
+
+enum bch_sb_error_id {
+#define x(t, n) BCH_FSCK_ERR_##t = n,
+ BCH_SB_ERRS()
+#undef x
+ BCH_SB_ERR_MAX
+};
+
struct bch_sb_error_entry_cpu {
u64 id:16,
nr:48;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index fccd25aa3242..22b34a8e4d6e 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -146,6 +146,24 @@ int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
}
+int bch2_subvol_is_ro_trans(struct btree_trans *trans, u32 subvol)
+{
+ struct bch_subvolume s;
+ int ret = bch2_subvolume_get_inlined(trans, subvol, true, 0, &s);
+ if (ret)
+ return ret;
+
+ if (BCH_SUBVOLUME_RO(&s))
+ return -EROFS;
+ return 0;
+}
+
+int bch2_subvol_is_ro(struct bch_fs *c, u32 subvol)
+{
+ return bch2_trans_do(c, NULL, NULL, 0,
+ bch2_subvol_is_ro_trans(trans, subvol));
+}
+
int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
struct bch_subvolume *subvol)
{
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index a1003d30ab0a..a6f56f66e27c 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -23,6 +23,9 @@ int bch2_subvolume_get(struct btree_trans *, unsigned,
bool, int, struct bch_subvolume *);
int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
+int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
+int bch2_subvol_is_ro(struct bch_fs *, u32);
+
int bch2_delete_dead_snapshots(struct bch_fs *);
void bch2_delete_dead_snapshots_async(struct bch_fs *);
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index f3e12f7979d5..78013deda9df 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -13,6 +13,7 @@
#include "replicas.h"
#include "quota.h"
#include "sb-clean.h"
+#include "sb-downgrade.h"
#include "sb-errors.h"
#include "sb-members.h"
#include "super-io.h"
@@ -163,8 +164,8 @@ void bch2_sb_field_delete(struct bch_sb_handle *sb,
void bch2_free_super(struct bch_sb_handle *sb)
{
kfree(sb->bio);
- if (!IS_ERR_OR_NULL(sb->bdev))
- blkdev_put(sb->bdev, sb->holder);
+ if (!IS_ERR_OR_NULL(sb->bdev_handle))
+ bdev_release(sb->bdev_handle);
kfree(sb->holder);
kfree(sb->sb_name);
@@ -264,6 +265,17 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
return f;
}
+struct bch_sb_field *bch2_sb_field_get_minsize_id(struct bch_sb_handle *sb,
+ enum bch_sb_field_type type,
+ unsigned u64s)
+{
+ struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
+
+ if (!f || le32_to_cpu(f->u64s) < u64s)
+ f = bch2_sb_field_resize_id(sb, type, u64s);
+ return f;
+}
+
/* Superblock validate: */
static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
@@ -484,6 +496,21 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
/* device open: */
+static unsigned long le_ulong_to_cpu(unsigned long v)
+{
+ return sizeof(unsigned long) == 8
+ ? le64_to_cpu(v)
+ : le32_to_cpu(v);
+}
+
+static void le_bitvector_to_cpu(unsigned long *dst, unsigned long *src, unsigned nr)
+{
+ BUG_ON(nr & (BITS_PER_TYPE(long) - 1));
+
+ for (unsigned i = 0; i < BITS_TO_LONGS(nr); i++)
+ dst[i] = le_ulong_to_cpu(src[i]);
+}
+
static void bch2_sb_update(struct bch_fs *c)
{
struct bch_sb *src = c->disk_sb.sb;
@@ -512,8 +539,15 @@ static void bch2_sb_update(struct bch_fs *c)
c->sb.features = le64_to_cpu(src->features[0]);
c->sb.compat = le64_to_cpu(src->compat[0]);
+ memset(c->sb.errors_silent, 0, sizeof(c->sb.errors_silent));
+
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(src, ext);
+ if (ext)
+ le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent,
+ sizeof(c->sb.errors_silent) * 8);
+
for_each_member_device(ca, c, i) {
- struct bch_member m = bch2_sb_member_get(src, i);
+ struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
ca->mi = bch2_mi_to_cpu(&m);
}
}
@@ -691,21 +725,22 @@ retry:
if (!opt_get(*opts, nochanges))
sb->mode |= BLK_OPEN_WRITE;
- sb->bdev = blkdev_get_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
- if (IS_ERR(sb->bdev) &&
- PTR_ERR(sb->bdev) == -EACCES &&
+ sb->bdev_handle = bdev_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
+ if (IS_ERR(sb->bdev_handle) &&
+ PTR_ERR(sb->bdev_handle) == -EACCES &&
opt_get(*opts, read_only)) {
sb->mode &= ~BLK_OPEN_WRITE;
- sb->bdev = blkdev_get_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
- if (!IS_ERR(sb->bdev))
+ sb->bdev_handle = bdev_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
+ if (!IS_ERR(sb->bdev_handle))
opt_set(*opts, nochanges, true);
}
- if (IS_ERR(sb->bdev)) {
- ret = PTR_ERR(sb->bdev);
+ if (IS_ERR(sb->bdev_handle)) {
+ ret = PTR_ERR(sb->bdev_handle);
goto out;
}
+ sb->bdev = sb->bdev_handle->bdev;
ret = bch2_sb_realloc(sb, 0);
if (ret) {
@@ -906,6 +941,7 @@ int bch2_write_super(struct bch_fs *c)
bch2_sb_members_from_cpu(c);
bch2_sb_members_cpy_v2_v1(&c->disk_sb);
bch2_sb_errors_from_cpu(c);
+ bch2_sb_downgrade_update(c);
for_each_online_member(ca, c, i)
bch2_sb_from_fs(c, ca);
@@ -1029,8 +1065,10 @@ void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
}
/* Downgrade if superblock is at a higher version than currently supported: */
-void bch2_sb_maybe_downgrade(struct bch_fs *c)
+bool bch2_check_version_downgrade(struct bch_fs *c)
{
+ bool ret = bcachefs_metadata_version_current < c->sb.version;
+
lockdep_assert_held(&c->sb_lock);
/*
@@ -1044,16 +1082,61 @@ void bch2_sb_maybe_downgrade(struct bch_fs *c)
if (c->sb.version_min > bcachefs_metadata_version_current)
c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
+ return ret;
}
void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version)
{
lockdep_assert_held(&c->sb_lock);
+ if (BCH_VERSION_MAJOR(new_version) >
+ BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version)))
+ bch2_sb_field_resize(&c->disk_sb, downgrade, 0);
+
c->disk_sb.sb->version = cpu_to_le16(new_version);
c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
}
+static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ if (vstruct_bytes(f) < 88) {
+ prt_printf(err, "field too small (%zu < %u)", vstruct_bytes(f), 88);
+ return -BCH_ERR_invalid_sb_ext;
+ }
+
+ return 0;
+}
+
+static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_ext *e = field_to_type(f, ext);
+
+ prt_printf(out, "Recovery passes required:");
+ prt_tab(out);
+ prt_bitflags(out, bch2_recovery_passes,
+ bch2_recovery_passes_from_stable(le64_to_cpu(e->recovery_passes_required[0])));
+ prt_newline(out);
+
+ unsigned long *errors_silent = kmalloc(sizeof(e->errors_silent), GFP_KERNEL);
+ if (errors_silent) {
+ le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
+
+ prt_printf(out, "Errors to silently fix:");
+ prt_tab(out);
+ prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8);
+ prt_newline(out);
+
+ kfree(errors_silent);
+ }
+}
+
+static const struct bch_sb_field_ops bch_sb_field_ops_ext = {
+ .validate = bch2_sb_ext_validate,
+ .to_text = bch2_sb_ext_to_text,
+};
+
static const struct bch_sb_field_ops *bch2_sb_field_ops[] = {
#define x(f, nr) \
[BCH_SB_FIELD_##f] = &bch_sb_field_ops_##f,
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index f5abd102bff7..e41e5de531a0 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -40,6 +40,16 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *,
#define bch2_sb_field_resize(_sb, _name, _u64s) \
field_to_type(bch2_sb_field_resize_id(_sb, BCH_SB_FIELD_##_name, _u64s), _name)
+struct bch_sb_field *bch2_sb_field_get_minsize_id(struct bch_sb_handle *,
+ enum bch_sb_field_type, unsigned);
+#define bch2_sb_field_get_minsize(_sb, _name, _u64s) \
+ field_to_type(bch2_sb_field_get_minsize_id(_sb, BCH_SB_FIELD_##_name, _u64s), _name)
+
+#define bch2_sb_field_nr_entries(_f) \
+ (_f ? ((bch2_sb_field_bytes(&_f->field) - sizeof(*_f)) / \
+ sizeof(_f->entries[0])) \
+ : 0)
+
void bch2_sb_field_delete(struct bch_sb_handle *, enum bch_sb_field_type);
extern const char * const bch2_sb_fields[];
@@ -83,7 +93,7 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
__bch2_check_set_feature(c, feat);
}
-void bch2_sb_maybe_downgrade(struct bch_fs *);
+bool bch2_check_version_downgrade(struct bch_fs *);
void bch2_sb_upgrade(struct bch_fs *, unsigned);
void bch2_sb_field_to_text(struct printbuf *, struct bch_sb *,
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index 9c1fd4ca2b10..b2119686e2e1 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -4,6 +4,7 @@
struct bch_sb_handle {
struct bch_sb *sb;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
char *sb_name;
struct bio *bio;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 2984b57b2958..b701f7fe0784 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -243,6 +243,7 @@ do { \
#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__)
#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__)
#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
+#define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__)
void bch2_pr_time_units(struct printbuf *, u64);
void bch2_prt_datetime(struct printbuf *, time64_t);
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 79d982674c18..5a1858fb9879 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -176,7 +176,8 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
struct btree_iter inode_iter = { NULL };
int ret;
- ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
+ ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
+ bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
if (ret)
return ret;
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index adc2230079c6..a778411574a9 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -11,6 +11,7 @@
*/
#include <linux/fs.h>
+#include <linux/mpage.h>
#include <linux/buffer_head.h>
#include "bfs.h"
@@ -150,9 +151,10 @@ out:
return err;
}
-static int bfs_writepage(struct page *page, struct writeback_control *wbc)
+static int bfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, bfs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, bfs_get_block);
}
static int bfs_read_folio(struct file *file, struct folio *folio)
@@ -190,9 +192,10 @@ const struct address_space_operations bfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = bfs_read_folio,
- .writepage = bfs_writepage,
+ .writepages = bfs_writepages,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = bfs_bmap,
};
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8f724c54fc8e..b6ff6f320198 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -875,7 +875,7 @@ static int attach_extent_buffer_page(struct extent_buffer *eb,
* will not race with any other ebs.
*/
if (page->mapping)
- lockdep_assert_held(&page->mapping->private_lock);
+ lockdep_assert_held(&page->mapping->i_private_lock);
if (fs_info->nodesize >= PAGE_SIZE) {
if (!PagePrivate(page))
@@ -1741,16 +1741,16 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* Take private lock to ensure the subpage won't be detached
* in the meantime.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!PagePrivate(page)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
break;
}
spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
bit_start++;
continue;
}
@@ -1764,7 +1764,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/
eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* The eb has already reached 0 refs thus find_extent_buffer()
@@ -1816,9 +1816,9 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return submit_eb_subpage(page, wbc);
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (!PagePrivate(page)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
@@ -1829,16 +1829,16 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
* crashing the machine for something we can survive anyway.
*/
if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
if (eb == ctx->eb) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
if (!ret)
return 0;
@@ -3062,7 +3062,7 @@ static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
{
struct btrfs_subpage *subpage;
- lockdep_assert_held(&page->mapping->private_lock);
+ lockdep_assert_held(&page->mapping->i_private_lock);
if (PagePrivate(page)) {
subpage = (struct btrfs_subpage *)page->private;
@@ -3085,14 +3085,14 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag
/*
* For mapped eb, we're going to change the page private, which should
- * be done under the private_lock.
+ * be done under the i_private_lock.
*/
if (mapped)
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!PagePrivate(page)) {
if (mapped)
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return;
}
@@ -3116,7 +3116,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag
detach_page_private(page);
}
if (mapped)
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return;
}
@@ -3139,7 +3139,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag
if (!page_range_has_eb(fs_info, page))
btrfs_detach_subpage(fs_info, page);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
}
/* Release all pages attached to the extent buffer */
@@ -3520,7 +3520,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* Preallocate page->private for subpage case, so that we won't
- * allocate memory with private_lock nor page lock hold.
+ * allocate memory with i_private_lock nor page lock hold.
*
* The memory will be freed by attach_extent_buffer_page() or freed
* manually if we exit earlier.
@@ -3541,10 +3541,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
goto free_eb;
}
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
exists = grab_extent_buffer(fs_info, p);
if (exists) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
unlock_page(p);
put_page(p);
mark_extent_buffer_accessed(exists, p);
@@ -3564,7 +3564,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
* Thus needs no special handling in error path.
*/
btrfs_page_inc_eb_refs(fs_info, p);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
eb->pages[i] = p;
@@ -4569,12 +4569,12 @@ static int try_release_subpage_extent_buffer(struct page *page)
* Finally to check if we have cleared page private, as if we have
* released all ebs in the page, the page private should be cleared now.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!PagePrivate(page))
ret = 1;
else
ret = 0;
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return ret;
}
@@ -4590,9 +4590,9 @@ int try_release_extent_buffer(struct page *page)
* We need to make sure nobody is changing page->private, as we rely on
* page->private as the pointer to extent buffer.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!PagePrivate(page)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 1;
}
@@ -4607,10 +4607,10 @@ int try_release_extent_buffer(struct page *page)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 0;
}
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb3c3f43c3fa..fea464b2a54e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -10930,7 +10930,7 @@ static const struct address_space_operations btrfs_aops = {
.release_folio = btrfs_release_folio,
.migrate_folio = btrfs_migrate_folio,
.dirty_folio = filemap_dirty_folio,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = btrfs_swap_activate,
.swap_deactivate = btrfs_swap_deactivate,
};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a1743904202b..41b479861b3c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4533,29 +4533,29 @@ static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool
if (ret < 0)
goto out_acct;
- file_start_write(file);
-
if (iov_iter_count(&iter) == 0) {
ret = 0;
- goto out_end_write;
+ goto out_iov;
}
pos = args.offset;
ret = rw_verify_area(WRITE, file, &pos, args.len);
if (ret < 0)
- goto out_end_write;
+ goto out_iov;
init_sync_kiocb(&kiocb, file);
ret = kiocb_set_rw_flags(&kiocb, 0);
if (ret)
- goto out_end_write;
+ goto out_iov;
kiocb.ki_pos = pos;
+ file_start_write(file);
+
ret = btrfs_do_write_iter(&kiocb, &iter, &args);
if (ret > 0)
fsnotify_modify(file);
-out_end_write:
file_end_write(file);
+out_iov:
kfree(iov);
out_acct:
if (ret > 0)
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 1b999c6e4193..2347cf15278b 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -200,7 +200,7 @@ void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
return;
ASSERT(PagePrivate(page) && page->mapping);
- lockdep_assert_held(&page->mapping->private_lock);
+ lockdep_assert_held(&page->mapping->i_private_lock);
subpage = (struct btrfs_subpage *)page->private;
atomic_inc(&subpage->eb_refs);
@@ -215,7 +215,7 @@ void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
return;
ASSERT(PagePrivate(page) && page->mapping);
- lockdep_assert_held(&page->mapping->private_lock);
+ lockdep_assert_held(&page->mapping->i_private_lock);
subpage = (struct btrfs_subpage *)page->private;
ASSERT(atomic_read(&subpage->eb_refs));
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index ef256b944c72..2a3a5bf102dc 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1406,6 +1406,8 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
return ERR_PTR(error);
}
+ /* No support for restricting writes to btrfs devices yet... */
+ mode &= ~BLK_OPEN_RESTRICT_WRITES;
/*
* Setup a dummy root and fs_info for test/set super. This is because
* we don't actually fill this stuff out until open_ctree, but we need
diff --git a/fs/buffer.c b/fs/buffer.c
index 967f34b70aa8..d3bcf601d3e5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -180,11 +180,11 @@ EXPORT_SYMBOL(end_buffer_write_sync);
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
- * private_lock.
+ * i_private_lock.
*
- * Hack idea: for the blockdev mapping, private_lock contention
+ * Hack idea: for the blockdev mapping, i_private_lock contention
* may be quite high. This code could TryLock the page, and if that
- * succeeds, there is no need to take private_lock.
+ * succeeds, there is no need to take i_private_lock.
*/
static struct buffer_head *
__find_get_block_slow(struct block_device *bdev, sector_t block)
@@ -199,12 +199,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
int all_mapped = 1;
static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
- index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+ index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
if (IS_ERR(folio))
goto out;
- spin_lock(&bd_mapping->private_lock);
+ spin_lock(&bd_mapping->i_private_lock);
head = folio_buffers(folio);
if (!head)
goto out_unlock;
@@ -236,7 +236,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
1 << bd_inode->i_blkbits);
}
out_unlock:
- spin_unlock(&bd_mapping->private_lock);
+ spin_unlock(&bd_mapping->i_private_lock);
folio_put(folio);
out:
return ret;
@@ -372,10 +372,10 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
}
/*
- * Completion handler for block_write_full_page() - pages which are unlocked
- * during I/O, and which have PageWriteback cleared upon I/O completion.
+ * Completion handler for block_write_full_folio() - folios which are unlocked
+ * during I/O, and which have the writeback flag cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
@@ -415,7 +415,6 @@ still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
return;
}
-EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
@@ -467,25 +466,25 @@ EXPORT_SYMBOL(mark_buffer_async_write);
*
* The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
* inode_has_buffers() and invalidate_inode_buffers() are provided for the
- * management of a list of dependent buffers at ->i_mapping->private_list.
+ * management of a list of dependent buffers at ->i_mapping->i_private_list.
*
* Locking is a little subtle: try_to_free_buffers() will remove buffers
* from their controlling inode's queue when they are being freed. But
* try_to_free_buffers() will be operating against the *blockdev* mapping
* at the time, not against the S_ISREG file which depends on those buffers.
- * So the locking for private_list is via the private_lock in the address_space
+ * So the locking for i_private_list is via the i_private_lock in the address_space
* which backs the buffers. Which is different from the address_space
* against which the buffers are listed. So for a particular address_space,
- * mapping->private_lock does *not* protect mapping->private_list! In fact,
- * mapping->private_list will always be protected by the backing blockdev's
- * ->private_lock.
+ * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
+ * mapping->i_private_list will always be protected by the backing blockdev's
+ * ->i_private_lock.
*
* Which introduces a requirement: all buffers on an address_space's
- * ->private_list must be from the same address_space: the blockdev's.
+ * ->i_private_list must be from the same address_space: the blockdev's.
*
- * address_spaces which do not place buffers at ->private_list via these
- * utility functions are free to use private_lock and private_list for
- * whatever they want. The only requirement is that list_empty(private_list)
+ * address_spaces which do not place buffers at ->i_private_list via these
+ * utility functions are free to use i_private_lock and i_private_list for
+ * whatever they want. The only requirement is that list_empty(i_private_list)
* be true at clear_inode() time.
*
* FIXME: clear_inode should not call invalidate_inode_buffers(). The
@@ -508,7 +507,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
*/
/*
- * The buffer's backing address_space's private_lock must be held
+ * The buffer's backing address_space's i_private_lock must be held
*/
static void __remove_assoc_queue(struct buffer_head *bh)
{
@@ -519,7 +518,7 @@ static void __remove_assoc_queue(struct buffer_head *bh)
int inode_has_buffers(struct inode *inode)
{
- return !list_empty(&inode->i_data.private_list);
+ return !list_empty(&inode->i_data.i_private_list);
}
/*
@@ -561,7 +560,7 @@ repeat:
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
*
- * Starts I/O against the buffers at mapping->private_list, and waits upon
+ * Starts I/O against the buffers at mapping->i_private_list, and waits upon
* that I/O.
*
* Basically, this is a convenience function for fsync().
@@ -570,13 +569,13 @@ repeat:
*/
int sync_mapping_buffers(struct address_space *mapping)
{
- struct address_space *buffer_mapping = mapping->private_data;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- if (buffer_mapping == NULL || list_empty(&mapping->private_list))
+ if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
return 0;
- return fsync_buffers_list(&buffer_mapping->private_lock,
- &mapping->private_list);
+ return fsync_buffers_list(&buffer_mapping->i_private_lock,
+ &mapping->i_private_list);
}
EXPORT_SYMBOL(sync_mapping_buffers);
@@ -673,17 +672,17 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
struct address_space *buffer_mapping = bh->b_folio->mapping;
mark_buffer_dirty(bh);
- if (!mapping->private_data) {
- mapping->private_data = buffer_mapping;
+ if (!mapping->i_private_data) {
+ mapping->i_private_data = buffer_mapping;
} else {
- BUG_ON(mapping->private_data != buffer_mapping);
+ BUG_ON(mapping->i_private_data != buffer_mapping);
}
if (!bh->b_assoc_map) {
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
list_move_tail(&bh->b_assoc_buffers,
- &mapping->private_list);
+ &mapping->i_private_list);
bh->b_assoc_map = mapping;
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
@@ -706,7 +705,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
* page on the dirty page list.
*
- * We use private_lock to lock against try_to_free_buffers while using the
+ * We use i_private_lock to lock against try_to_free_buffers while using the
* page's buffer list. Also use this to protect against clean buffers being
* added to the page after it was set dirty.
*
@@ -718,7 +717,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
struct buffer_head *head;
bool newly_dirty;
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
head = folio_buffers(folio);
if (head) {
struct buffer_head *bh = head;
@@ -734,7 +733,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
*/
folio_memcg_lock(folio);
newly_dirty = !folio_test_set_dirty(folio);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
if (newly_dirty)
__folio_mark_dirty(folio, mapping, 1);
@@ -827,7 +826,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
smp_mb();
if (buffer_dirty(bh)) {
list_add(&bh->b_assoc_buffers,
- &mapping->private_list);
+ &mapping->i_private_list);
bh->b_assoc_map = mapping;
}
spin_unlock(lock);
@@ -851,7 +850,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* probably unmounting the fs, but that doesn't mean we have already
* done a sync(). Just drop the buffers from the inode list.
*
- * NOTE: we take the inode's blockdev's mapping's private_lock. Which
+ * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
* assumes that all the buffers are against the blockdev. Not true
* for reiserfs.
*/
@@ -859,13 +858,13 @@ void invalidate_inode_buffers(struct inode *inode)
{
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->private_data;
+ struct list_head *list = &mapping->i_private_list;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
while (!list_empty(list))
__remove_assoc_queue(BH_ENTRY(list->next));
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
}
EXPORT_SYMBOL(invalidate_inode_buffers);
@@ -882,10 +881,10 @@ int remove_inode_buffers(struct inode *inode)
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->private_data;
+ struct list_head *list = &mapping->i_private_list;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
while (!list_empty(list)) {
struct buffer_head *bh = BH_ENTRY(list->next);
if (buffer_dirty(bh)) {
@@ -894,7 +893,7 @@ int remove_inode_buffers(struct inode *inode)
}
__remove_assoc_queue(bh);
}
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
return ret;
}
@@ -995,11 +994,12 @@ static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
* Initialise the state of a blockdev folio's buffers.
*/
static sector_t folio_init_buffers(struct folio *folio,
- struct block_device *bdev, sector_t block, int size)
+ struct block_device *bdev, unsigned size)
{
struct buffer_head *head = folio_buffers(folio);
struct buffer_head *bh = head;
bool uptodate = folio_test_uptodate(folio);
+ sector_t block = div_u64(folio_pos(folio), size);
sector_t end_block = blkdev_max_block(bdev, size);
do {
@@ -1024,86 +1024,88 @@ static sector_t folio_init_buffers(struct folio *folio,
}
/*
- * Create the page-cache page that contains the requested block.
+ * Create the page-cache folio that contains the requested block.
*
* This is used purely for blockdev mappings.
+ *
+ * Returns false if we have a failure which cannot be cured by retrying
+ * without sleeping. Returns true if we succeeded, or the caller should retry.
*/
-static int
-grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size, int sizebits, gfp_t gfp)
+static bool grow_dev_folio(struct block_device *bdev, sector_t block,
+ pgoff_t index, unsigned size, gfp_t gfp)
{
struct inode *inode = bdev->bd_inode;
struct folio *folio;
struct buffer_head *bh;
- sector_t end_block;
- int ret = 0;
+ sector_t end_block = 0;
folio = __filemap_get_folio(inode->i_mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
- return PTR_ERR(folio);
+ return false;
bh = folio_buffers(folio);
if (bh) {
if (bh->b_size == size) {
- end_block = folio_init_buffers(folio, bdev,
- (sector_t)index << sizebits, size);
- goto done;
+ end_block = folio_init_buffers(folio, bdev, size);
+ goto unlock;
+ }
+
+ /*
+ * Retrying may succeed; for example the folio may finish
+ * writeback, or buffers may be cleaned. This should not
+ * happen very often; maybe we have old buffers attached to
+ * this blockdev's page cache and we're trying to change
+ * the block size?
+ */
+ if (!try_to_free_buffers(folio)) {
+ end_block = ~0ULL;
+ goto unlock;
}
- if (!try_to_free_buffers(folio))
- goto failed;
}
- ret = -ENOMEM;
bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
if (!bh)
- goto failed;
+ goto unlock;
/*
* Link the folio to the buffers and initialise them. Take the
* lock to be atomic wrt __find_get_block(), which does not
* run under the folio lock.
*/
- spin_lock(&inode->i_mapping->private_lock);
+ spin_lock(&inode->i_mapping->i_private_lock);
link_dev_buffers(folio, bh);
- end_block = folio_init_buffers(folio, bdev,
- (sector_t)index << sizebits, size);
- spin_unlock(&inode->i_mapping->private_lock);
-done:
- ret = (block < end_block) ? 1 : -ENXIO;
-failed:
+ end_block = folio_init_buffers(folio, bdev, size);
+ spin_unlock(&inode->i_mapping->i_private_lock);
+unlock:
folio_unlock(folio);
folio_put(folio);
- return ret;
+ return block < end_block;
}
/*
- * Create buffers for the specified block device block's page. If
- * that page was dirty, the buffers are set dirty also.
+ * Create buffers for the specified block device block's folio. If
+ * that folio was dirty, the buffers are set dirty also. Returns false
+ * if we've hit a permanent error.
*/
-static int
-grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
+static bool grow_buffers(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
{
- pgoff_t index;
- int sizebits;
-
- sizebits = PAGE_SHIFT - __ffs(size);
- index = block >> sizebits;
+ loff_t pos;
/*
- * Check for a block which wants to lie outside our maximum possible
- * pagecache index. (this comparison is done using sector_t types).
+ * Check for a block which lies outside our maximum possible
+ * pagecache index.
*/
- if (unlikely(index != block >> sizebits)) {
- printk(KERN_ERR "%s: requested out-of-range block %llu for "
- "device %pg\n",
+ if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
+ printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
__func__, (unsigned long long)block,
bdev);
- return -EIO;
+ return false;
}
- /* Create a page with the proper size buffers.. */
- return grow_dev_page(bdev, block, index, size, sizebits, gfp);
+ /* Create a folio with the proper size buffers */
+ return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
}
static struct buffer_head *
@@ -1124,14 +1126,12 @@ __getblk_slow(struct block_device *bdev, sector_t block,
for (;;) {
struct buffer_head *bh;
- int ret;
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
- ret = grow_buffers(bdev, block, size, gfp);
- if (ret < 0)
+ if (!grow_buffers(bdev, block, size, gfp))
return NULL;
}
}
@@ -1168,7 +1168,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* and then attach the address_space's inode to its superblock's dirty
* inode list.
*
- * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
+ * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
* i_pages lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
@@ -1246,10 +1246,10 @@ void __bforget(struct buffer_head *bh)
if (bh->b_assoc_map) {
struct address_space *buffer_mapping = bh->b_folio->mapping;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
list_del_init(&bh->b_assoc_buffers);
bh->b_assoc_map = NULL;
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
__brelse(bh);
}
@@ -1638,7 +1638,7 @@ EXPORT_SYMBOL(block_invalidate_folio);
/*
* We attach and possibly dirty the buffers atomically wrt
- * block_dirty_folio() via private_lock. try_to_free_buffers
+ * block_dirty_folio() via i_private_lock. try_to_free_buffers
* is already excluded via the folio lock.
*/
struct buffer_head *create_empty_buffers(struct folio *folio,
@@ -1656,7 +1656,7 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
} while (bh);
tail->b_this_page = head;
- spin_lock(&folio->mapping->private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
bh = head;
do {
@@ -1668,7 +1668,7 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
} while (bh != head);
}
folio_attach_private(folio, head);
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return head;
}
@@ -1699,13 +1699,13 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
struct inode *bd_inode = bdev->bd_inode;
struct address_space *bd_mapping = bd_inode->i_mapping;
struct folio_batch fbatch;
- pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+ pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
pgoff_t end;
int i, count;
struct buffer_head *bh;
struct buffer_head *head;
- end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
+ end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE;
folio_batch_init(&fbatch);
while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
count = folio_batch_count(&fbatch);
@@ -1715,7 +1715,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
if (!folio_buffers(folio))
continue;
/*
- * We use folio lock instead of bd_mapping->private_lock
+ * We use folio lock instead of bd_mapping->i_private_lock
* to pin buffers here since we can afford to sleep and
* it scales better than a global spinlock lock.
*/
@@ -1748,19 +1748,6 @@ unlock_page:
}
EXPORT_SYMBOL(clean_bdev_aliases);
-/*
- * Size is a power-of-two in the range 512..PAGE_SIZE,
- * and the case we care about most is PAGE_SIZE.
- *
- * So this *could* possibly be written with those
- * constraints in mind (relevant mostly if some
- * architecture has a slow bit-scan instruction)
- */
-static inline int block_size_bits(unsigned int blocksize)
-{
- return ilog2(blocksize);
-}
-
static struct buffer_head *folio_create_buffers(struct folio *folio,
struct inode *inode,
unsigned int b_state)
@@ -1790,30 +1777,29 @@ static struct buffer_head *folio_create_buffers(struct folio *folio,
*/
/*
- * While block_write_full_page is writing back the dirty buffers under
+ * While block_write_full_folio is writing back the dirty buffers under
* the page lock, whoever dirtied the buffers may decide to clean them
* again at any time. We handle that by only looking at the buffer
* state inside lock_buffer().
*
- * If block_write_full_page() is called for regular writeback
+ * If block_write_full_folio() is called for regular writeback
* (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
* locked buffer. This only can happen if someone has written the buffer
* directly, with submit_bh(). At the address_space level PageWriteback
* prevents this contention from occurring.
*
- * If block_write_full_page() is called with wbc->sync_mode ==
+ * If block_write_full_folio() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
* causes the writes to be flagged as synchronous writes.
*/
int __block_write_full_folio(struct inode *inode, struct folio *folio,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler)
+ get_block_t *get_block, struct writeback_control *wbc)
{
int err;
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
- unsigned int blocksize, bbits;
+ size_t blocksize;
int nr_underway = 0;
blk_opf_t write_flags = wbc_to_write_flags(wbc);
@@ -1832,10 +1818,9 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
bh = head;
blocksize = bh->b_size;
- bbits = block_size_bits(blocksize);
- block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
- last_block = (i_size_read(inode) - 1) >> bbits;
+ block = div_u64(folio_pos(folio), blocksize);
+ last_block = div_u64(i_size_read(inode) - 1, blocksize);
/*
* Get all the dirty buffers mapped to disk addresses and
@@ -1849,7 +1834,7 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
* truncate in progress.
*/
/*
- * The buffer was zeroed by block_write_full_page()
+ * The buffer was zeroed by block_write_full_folio()
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
@@ -1887,7 +1872,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
continue;
}
if (test_clear_buffer_dirty(bh)) {
- mark_buffer_async_write_endio(bh, handler);
+ mark_buffer_async_write_endio(bh,
+ end_buffer_async_write);
} else {
unlock_buffer(bh);
}
@@ -1940,7 +1926,8 @@ recover:
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
- mark_buffer_async_write_endio(bh, handler);
+ mark_buffer_async_write_endio(bh,
+ end_buffer_async_write);
} else {
/*
* The buffer may have been set dirty during
@@ -2014,7 +2001,7 @@ static int
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
const struct iomap *iomap)
{
- loff_t offset = block << inode->i_blkbits;
+ loff_t offset = (loff_t)block << inode->i_blkbits;
bh->b_bdev = iomap->bdev;
@@ -2081,27 +2068,24 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap)
{
- unsigned from = pos & (PAGE_SIZE - 1);
- unsigned to = from + len;
+ size_t from = offset_in_folio(folio, pos);
+ size_t to = from + len;
struct inode *inode = folio->mapping->host;
- unsigned block_start, block_end;
+ size_t block_start, block_end;
sector_t block;
int err = 0;
- unsigned blocksize, bbits;
+ size_t blocksize;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
BUG_ON(!folio_test_locked(folio));
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ BUG_ON(to > folio_size(folio));
BUG_ON(from > to);
head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
- bbits = block_size_bits(blocksize);
+ block = div_u64(folio_pos(folio), blocksize);
- block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
-
- for(bh = head, block_start = 0; bh != head || !block_start;
+ for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
@@ -2364,7 +2348,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- unsigned int blocksize, bbits;
+ size_t blocksize;
int nr, i;
int fully_mapped = 1;
bool page_error = false;
@@ -2378,10 +2362,9 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
- bbits = block_size_bits(blocksize);
- iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
- lblock = (limit+blocksize-1) >> bbits;
+ iblock = div_u64(folio_pos(folio), blocksize);
+ lblock = div_u64(limit + blocksize - 1, blocksize);
bh = head;
nr = 0;
i = 0;
@@ -2666,8 +2649,8 @@ int block_truncate_page(struct address_space *mapping,
return 0;
length = blocksize - length;
- iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
-
+ iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
+
folio = filemap_grab_folio(mapping, index);
if (IS_ERR(folio))
return PTR_ERR(folio);
@@ -2720,17 +2703,15 @@ EXPORT_SYMBOL(block_truncate_page);
/*
* The generic ->writepage function for buffer-backed address_spaces
*/
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc)
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block)
{
- struct folio *folio = page_folio(page);
struct inode * const inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
/* Is the folio fully inside i_size? */
if (folio_pos(folio) + folio_size(folio) <= i_size)
- return __block_write_full_folio(inode, folio, get_block, wbc,
- end_buffer_async_write);
+ return __block_write_full_folio(inode, folio, get_block, wbc);
/* Is the folio fully outside i_size? (truncate in progress) */
if (folio_pos(folio) >= i_size) {
@@ -2747,10 +2728,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
*/
folio_zero_segment(folio, offset_in_folio(folio, i_size),
folio_size(folio));
- return __block_write_full_folio(inode, folio, get_block, wbc,
- end_buffer_async_write);
+ return __block_write_full_folio(inode, folio, get_block, wbc);
}
-EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
@@ -2883,7 +2862,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
* are unused, and releases them if so.
*
* Exclusion against try_to_free_buffers may be obtained by either
- * locking the folio or by holding its mapping's private_lock.
+ * locking the folio or by holding its mapping's i_private_lock.
*
* If the folio is dirty but all the buffers are clean then we need to
* be sure to mark the folio clean as well. This is because the folio
@@ -2894,7 +2873,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
* The same applies to regular filesystem folios: if all the buffers are
* clean then we set the folio clean and proceed. To do that, we require
* total exclusion from block_dirty_folio(). That is obtained with
- * private_lock.
+ * i_private_lock.
*
* try_to_free_buffers() is non-blocking.
*/
@@ -2946,7 +2925,7 @@ bool try_to_free_buffers(struct folio *folio)
goto out;
}
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
ret = drop_buffers(folio, &buffers_to_free);
/*
@@ -2959,13 +2938,13 @@ bool try_to_free_buffers(struct folio *folio)
* the folio's buffers clean. We discover that here and clean
* the folio also.
*
- * private_lock must be held over this entire operation in order
+ * i_private_lock must be held over this entire operation in order
* to synchronise against block_dirty_folio and prevent the
* dirty bit from being lost.
*/
if (ret)
folio_cancel_dirty(folio);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index aa4efcabb5e3..3f24905f4066 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
{ "tag", cachefiles_daemon_tag },
#ifdef CONFIG_CACHEFILES_ONDEMAND
{ "copen", cachefiles_ondemand_copen },
+ { "restore", cachefiles_ondemand_restore },
#endif
{ "", NULL }
};
@@ -355,14 +356,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
{
struct cachefiles_cache *cache = file->private_data;
+ XA_STATE(xas, &cache->reqs, 0);
+ struct cachefiles_req *req;
__poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll);
mask = 0;
if (cachefiles_in_ondemand_mode(cache)) {
- if (!xa_empty(&cache->reqs))
- mask |= EPOLLIN;
+ if (!xa_empty(&cache->reqs)) {
+ rcu_read_lock();
+ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ if (!cachefiles_ondemand_is_reopening_read(req)) {
+ mask |= EPOLLIN;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
} else {
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
mask |= EPOLLIN;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 40052bdb3365..35ba2117a6f6 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
if (!object)
return NULL;
+ if (cachefiles_ondemand_init_obj_info(object, volume)) {
+ kmem_cache_free(cachefiles_object_jar, object);
+ return NULL;
+ }
+
refcount_set(&object->ref, 1);
spin_lock_init(&object->lock);
@@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object,
ASSERTCMP(object->file, ==, NULL);
kfree(object->d_name);
-
+ cachefiles_ondemand_deinit_obj_info(object);
cache = object->volume->cache->cache;
fscache_put_cookie(object->cookie, fscache_cookie_put_object);
object->cookie = NULL;
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 2ad58c465208..4a87c9d714a9 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -44,6 +44,19 @@ struct cachefiles_volume {
struct dentry *fanout[256]; /* Fanout subdirs */
};
+enum cachefiles_object_state {
+ CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
+ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
+ CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
+};
+
+struct cachefiles_ondemand_info {
+ struct work_struct ondemand_work;
+ int ondemand_id;
+ enum cachefiles_object_state state;
+ struct cachefiles_object *object;
+};
+
/*
* Backing file state.
*/
@@ -61,7 +74,7 @@ struct cachefiles_object {
unsigned long flags;
#define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */
#ifdef CONFIG_CACHEFILES_ONDEMAND
- int ondemand_id;
+ struct cachefiles_ondemand_info *ondemand;
#endif
};
@@ -290,12 +303,42 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache,
char *args);
+extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache,
+ char *args);
+
extern int cachefiles_ondemand_init_object(struct cachefiles_object *object);
extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object);
extern int cachefiles_ondemand_read(struct cachefiles_object *object,
loff_t pos, size_t len);
+extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
+ struct cachefiles_volume *volume);
+extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj);
+
+#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \
+static inline bool \
+cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \
+{ \
+ return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
+} \
+ \
+static inline void \
+cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
+{ \
+ object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
+}
+
+CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
+CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
+CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
+
+static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
+{
+ return cachefiles_ondemand_object_is_reopening(req->object) &&
+ req->msg.opcode == CACHEFILES_OP_READ;
+}
+
#else
static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
@@ -317,6 +360,20 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object,
{
return -EOPNOTSUPP;
}
+
+static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
+ struct cachefiles_volume *volume)
+{
+ return 0;
+}
+static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj)
+{
+}
+
+static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
+{
+ return false;
+}
#endif
/*
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 009d23cd435b..5857241c5918 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -259,7 +259,8 @@ static void cachefiles_write_complete(struct kiocb *iocb, long ret)
_enter("%ld", ret);
- kiocb_end_write(iocb);
+ if (ki->was_async)
+ kiocb_end_write(iocb);
if (ret < 0)
trace_cachefiles_io_error(object, inode, ret,
@@ -319,8 +320,6 @@ int __cachefiles_write(struct cachefiles_object *object,
ki->iocb.ki_complete = cachefiles_write_complete;
atomic_long_add(ki->b_writing, &cache->b_writing);
- kiocb_start_write(&ki->iocb);
-
get_file(ki->iocb.ki_filp);
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 0254ed39f68c..b8fbbb1961bb 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -9,21 +9,19 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
{
struct cachefiles_object *object = file->private_data;
struct cachefiles_cache *cache = object->volume->cache;
- int object_id = object->ondemand_id;
+ struct cachefiles_ondemand_info *info = object->ondemand;
+ int object_id = info->ondemand_id;
struct cachefiles_req *req;
XA_STATE(xas, &cache->reqs, 0);
xa_lock(&cache->reqs);
- object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
+ info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
+ cachefiles_ondemand_set_object_close(object);
- /*
- * Flush all pending READ requests since their completion depends on
- * anon_fd.
- */
- xas_for_each(&xas, req, ULONG_MAX) {
+ /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
+ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
if (req->msg.object_id == object_id &&
- req->msg.opcode == CACHEFILES_OP_READ) {
- req->error = -EIO;
+ req->msg.opcode == CACHEFILES_OP_CLOSE) {
complete(&req->done);
xas_store(&xas, NULL);
}
@@ -176,11 +174,37 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
trace_cachefiles_ondemand_copen(req->object, id, size);
+ cachefiles_ondemand_set_object_open(req->object);
+ wake_up_all(&cache->daemon_pollwq);
+
out:
complete(&req->done);
return ret;
}
+int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
+{
+ struct cachefiles_req *req;
+
+ XA_STATE(xas, &cache->reqs, 0);
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+
+ /*
+ * Reset the requests to CACHEFILES_REQ_NEW state, so that the
+ * requests have been processed halfway before the crash of the
+ * user daemon could be reprocessed after the recovery.
+ */
+ xas_lock(&xas);
+ xas_for_each(&xas, req, ULONG_MAX)
+ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
+ xas_unlock(&xas);
+
+ wake_up_all(&cache->daemon_pollwq);
+ return 0;
+}
+
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
{
struct cachefiles_object *object;
@@ -218,8 +242,7 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
load = (void *)req->msg.data;
load->fd = fd;
- req->msg.object_id = object_id;
- object->ondemand_id = object_id;
+ object->ondemand->ondemand_id = object_id;
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
@@ -234,6 +257,43 @@ err:
return ret;
}
+static void ondemand_object_worker(struct work_struct *work)
+{
+ struct cachefiles_ondemand_info *info =
+ container_of(work, struct cachefiles_ondemand_info, ondemand_work);
+
+ cachefiles_ondemand_init_object(info->object);
+}
+
+/*
+ * If there are any inflight or subsequent READ requests on the
+ * closed object, reopen it.
+ * Skip read requests whose related object is reopening.
+ */
+static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
+ unsigned long xa_max)
+{
+ struct cachefiles_req *req;
+ struct cachefiles_object *object;
+ struct cachefiles_ondemand_info *info;
+
+ xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
+ if (req->msg.opcode != CACHEFILES_OP_READ)
+ return req;
+ object = req->object;
+ info = object->ondemand;
+ if (cachefiles_ondemand_object_is_close(object)) {
+ cachefiles_ondemand_set_object_reopening(object);
+ queue_work(fscache_wq, &info->ondemand_work);
+ continue;
+ }
+ if (cachefiles_ondemand_object_is_reopening(object))
+ continue;
+ return req;
+ }
+ return NULL;
+}
+
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
@@ -244,16 +304,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
int ret = 0;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
+ xa_lock(&cache->reqs);
/*
* Cyclically search for a request that has not ever been processed,
* to prevent requests from being processed repeatedly, and make
* request distribution fair.
*/
- xa_lock(&cache->reqs);
- req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
+ req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
if (!req && cache->req_id_next > 0) {
xas_set(&xas, 0);
- req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
+ req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
}
if (!req) {
xa_unlock(&cache->reqs);
@@ -273,14 +333,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
xa_unlock(&cache->reqs);
id = xas.xa_index;
- msg->msg_id = id;
if (msg->opcode == CACHEFILES_OP_OPEN) {
ret = cachefiles_ondemand_get_fd(req);
- if (ret)
+ if (ret) {
+ cachefiles_ondemand_set_object_close(req->object);
goto error;
+ }
}
+ msg->msg_id = id;
+ msg->object_id = req->object->ondemand->ondemand_id;
+
if (copy_to_user(_buffer, msg, n) != 0) {
ret = -EFAULT;
goto err_put_fd;
@@ -313,19 +377,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
void *private)
{
struct cachefiles_cache *cache = object->volume->cache;
- struct cachefiles_req *req;
+ struct cachefiles_req *req = NULL;
XA_STATE(xas, &cache->reqs, 0);
int ret;
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return 0;
- if (test_bit(CACHEFILES_DEAD, &cache->flags))
- return -EIO;
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+ ret = -EIO;
+ goto out;
+ }
req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
req->object = object;
init_completion(&req->done);
@@ -363,8 +431,9 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
/* coupled with the barrier in cachefiles_flush_reqs() */
smp_mb();
- if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
- WARN_ON_ONCE(object->ondemand_id == 0);
+ if (opcode == CACHEFILES_OP_CLOSE &&
+ !cachefiles_ondemand_object_is_open(object)) {
+ WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
xas_unlock(&xas);
ret = -EIO;
goto out;
@@ -387,7 +456,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
wake_up_all(&cache->daemon_pollwq);
wait_for_completion(&req->done);
ret = req->error;
+ kfree(req);
+ return ret;
out:
+ /* Reset the object to close state in error handling path.
+ * If error occurs after creating the anonymous fd,
+ * cachefiles_ondemand_fd_release() will set object to close.
+ */
+ if (opcode == CACHEFILES_OP_OPEN)
+ cachefiles_ondemand_set_object_close(object);
kfree(req);
return ret;
}
@@ -430,18 +507,10 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
void *private)
{
struct cachefiles_object *object = req->object;
- int object_id = object->ondemand_id;
- /*
- * It's possible that object id is still 0 if the cookie looking up
- * phase failed before OPEN request has ever been sent. Also avoid
- * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
- * anon_fd has already been closed.
- */
- if (object_id <= 0)
+ if (!cachefiles_ondemand_object_is_open(object))
return -ENOENT;
- req->msg.object_id = object_id;
trace_cachefiles_ondemand_close(object, &req->msg);
return 0;
}
@@ -457,16 +526,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
struct cachefiles_object *object = req->object;
struct cachefiles_read *load = (void *)req->msg.data;
struct cachefiles_read_ctx *read_ctx = private;
- int object_id = object->ondemand_id;
-
- /* Stop enqueuing requests when daemon has closed anon_fd. */
- if (object_id <= 0) {
- WARN_ON_ONCE(object_id == 0);
- pr_info_once("READ: anonymous fd closed prematurely.\n");
- return -EIO;
- }
- req->msg.object_id = object_id;
load->off = read_ctx->off;
load->len = read_ctx->len;
trace_cachefiles_ondemand_read(object, &req->msg, load);
@@ -485,7 +545,7 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
* creating a new tmpfile as the cache file. Reuse the previously
* allocated object ID if any.
*/
- if (object->ondemand_id > 0)
+ if (cachefiles_ondemand_object_is_open(object))
return 0;
volume_key_size = volume->key[0] + 1;
@@ -503,6 +563,28 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
cachefiles_ondemand_init_close_req, NULL);
}
+int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
+ struct cachefiles_volume *volume)
+{
+ if (!cachefiles_in_ondemand_mode(volume->cache))
+ return 0;
+
+ object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
+ GFP_KERNEL);
+ if (!object->ondemand)
+ return -ENOMEM;
+
+ object->ondemand->object = object;
+ INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
+ return 0;
+}
+
+void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
+{
+ kfree(object->ondemand);
+ object->ondemand = NULL;
+}
+
int cachefiles_ondemand_read(struct cachefiles_object *object,
loff_t pos, size_t len)
{
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 85be3bf18cdf..13af429ab030 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -907,8 +907,8 @@ static void writepages_finish(struct ceph_osd_request *req)
doutc(cl, "unlocking %p\n", page);
if (remove_page)
- generic_error_remove_page(inode->i_mapping,
- page);
+ generic_error_remove_folio(inode->i_mapping,
+ page_folio(page));
unlock_page(page);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 3b5aae29e944..d380d9dad0e0 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -12,6 +12,7 @@
#include <linux/falloc.h>
#include <linux/iversion.h>
#include <linux/ktime.h>
+#include <linux/splice.h>
#include "super.h"
#include "mds_client.h"
@@ -3010,8 +3011,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* {read,write}_iter, which will get caps again.
*/
put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
- ret = do_splice_direct(src_file, &src_off, dst_file,
- &dst_off, src_objlen, flags);
+ ret = splice_file_range(src_file, &src_off, dst_file, &dst_off,
+ src_objlen);
/* Abort on short copies or on error */
if (ret < (long)src_objlen) {
doutc(cl, "Failed partial copy (%zd)\n", ret);
@@ -3065,8 +3066,8 @@ out_caps:
*/
if (len && (len < src_ci->i_layout.object_size)) {
doutc(cl, "Final partial copy of %zu bytes\n", len);
- bytes = do_splice_direct(src_file, &src_off, dst_file,
- &dst_off, len, flags);
+ bytes = splice_file_range(src_file, &src_off, dst_file,
+ &dst_off, len);
if (bytes > 0)
ret += bytes;
else
@@ -3089,8 +3090,8 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
len, flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src_file, src_off, dst_file,
- dst_off, len, flags);
+ ret = splice_copy_file_range(src_file, src_off, dst_file,
+ dst_off, len);
return ret;
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 16acc58311ea..148856a582a9 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -79,14 +79,12 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
if (ret)
goto finish_write;
- file_start_write(host_file);
inode_lock(coda_inode);
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
inode_set_mtime_to_ts(coda_inode, inode_set_ctime_current(coda_inode));
inode_unlock(coda_inode);
- file_end_write(host_file);
finish_write:
venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
diff --git a/fs/dax.c b/fs/dax.c
index 3380b43cb6bb..423fc1607dfa 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1128,7 +1128,7 @@ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
/* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
srcmap->type == IOMAP_UNWRITTEN;
- void *saddr = 0;
+ void *saddr = NULL;
int ret = 0;
if (!zero_edge) {
diff --git a/fs/dcache.c b/fs/dcache.c
index c82ae731df9a..2ba37643b9c5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -428,7 +428,8 @@ static void d_lru_add(struct dentry *dentry)
this_cpu_inc(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_inc(nr_dentry_negative);
- WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ WARN_ON_ONCE(!list_lru_add_obj(
+ &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_lru_del(struct dentry *dentry)
@@ -438,7 +439,8 @@ static void d_lru_del(struct dentry *dentry)
this_cpu_dec(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
- WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ WARN_ON_ONCE(!list_lru_del_obj(
+ &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_shrink_del(struct dentry *dentry)
@@ -1240,7 +1242,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
*
* This is guaranteed by the fact that all LRU management
* functions are intermediated by the LRU API calls like
- * list_lru_add and list_lru_del. List movement in this file
+ * list_lru_add_obj and list_lru_del_obj. List movement in this file
* only ever occur through this functions or through callbacks
* like this one, that are called from the LRU API.
*
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 6d7c1a49581f..c6f4a9a98b85 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1100,17 +1100,35 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf,
return r;
}
+static ssize_t write_file_blob(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ struct dentry *dentry = F_DENTRY(file);
+ ssize_t r;
+
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
+ return r;
+ r = simple_write_to_buffer(blob->data, blob->size, ppos, user_buf,
+ count);
+
+ debugfs_file_put(dentry);
+ return r;
+}
+
static const struct file_operations fops_blob = {
.read = read_file_blob,
+ .write = write_file_blob,
.open = simple_open,
.llseek = default_llseek,
};
/**
- * debugfs_create_blob - create a debugfs file that is used to read a binary blob
+ * debugfs_create_blob - create a debugfs file that is used to read and write
+ * a binary blob
* @name: a pointer to a string containing the name of the file to create.
- * @mode: the read permission that the file should have (other permissions are
- * masked out)
+ * @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
@@ -1119,7 +1137,7 @@ static const struct file_operations fops_blob = {
*
* This function creates a file in debugfs with the given name that exports
* @blob->data as a binary blob. If the @mode variable is so set it can be
- * read from. Writing is not supported.
+ * read from and written to.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
@@ -1134,7 +1152,7 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
- return debugfs_create_file_unsafe(name, mode & 0444, parent, blob, &fops_blob);
+ return debugfs_create_file_unsafe(name, mode & 0644, parent, blob, &fops_blob);
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 20533266ade6..60456263a338 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1114,7 +1114,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
loff_t offset = iocb->ki_pos;
const loff_t end = offset + count;
struct dio *dio;
- struct dio_submit sdio = { 0, };
+ struct dio_submit sdio = { NULL, };
struct buffer_head map_bh = { 0, };
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 33a918f9566c..ad8186d47ba7 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -43,7 +43,17 @@ struct eventfd_ctx {
int id;
};
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
+/**
+ * eventfd_signal_mask - Increment the event counter
+ * @ctx: [in] Pointer to the eventfd context.
+ * @mask: [in] poll mask
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returning a EPOLLERR
+ * to poll(2).
+ */
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
unsigned long flags;
@@ -56,45 +66,23 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
* safe context.
*/
if (WARN_ON_ONCE(current->in_eventfd))
- return 0;
+ return;
spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd = 1;
- if (ULLONG_MAX - ctx->count < n)
- n = ULLONG_MAX - ctx->count;
- ctx->count += n;
+ if (ctx->count < ULLONG_MAX)
+ ctx->count++;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
-
- return n;
-}
-
-/**
- * eventfd_signal - Adds @n to the eventfd counter.
- * @ctx: [in] Pointer to the eventfd context.
- * @n: [in] Value of the counter to be added to the eventfd internal counter.
- * The value cannot be negative.
- *
- * This function is supposed to be called by the kernel in paths that do not
- * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
- * value, and we signal this as overflow condition by returning a EPOLLERR
- * to poll(2).
- *
- * Returns the amount by which the counter was incremented. This will be less
- * than @n if the counter has overflowed.
- */
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
-{
- return eventfd_signal_mask(ctx, n, 0);
}
-EXPORT_SYMBOL_GPL(eventfd_signal);
+EXPORT_SYMBOL_GPL(eventfd_signal_mask);
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
if (ctx->id >= 0)
- ida_simple_remove(&eventfd_ida, ctx->id);
+ ida_free(&eventfd_ida, ctx->id);
kfree(ctx);
}
@@ -407,7 +395,7 @@ static int do_eventfd(unsigned int count, int flags)
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
+ ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL);
flags &= EFD_SHARED_FCNTL_FLAGS;
flags |= O_RDWR;
diff --git a/fs/exec.c b/fs/exec.c
index 4aa19b24f281..ee43597cb453 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1578,11 +1578,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
*/
- t = p;
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
- while_each_thread(p, t) {
+ for_other_threads(p, t) {
if (t->fs == p->fs)
n_fs++;
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 464faf6c217e..5a4272b2c6b0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -969,7 +969,7 @@ const struct address_space_operations ext2_aops = {
.writepages = ext2_writepages,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
static const struct address_space_operations ext2_dax_aops = {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 9a84a5f9fef4..d5bd1e3a5d36 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -502,9 +502,8 @@ static int ext4_read_inline_folio(struct inode *inode, struct folio *folio)
BUG_ON(len > PAGE_SIZE);
kaddr = kmap_local_folio(folio, 0);
ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
- flush_dcache_folio(folio);
+ kaddr = folio_zero_tail(folio, len, kaddr + len);
kunmap_local(kaddr);
- folio_zero_segment(folio, len, folio_size(folio));
folio_mark_uptodate(folio);
brelse(iloc.bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 61277f7f8722..83ee4e0f46f4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1261,7 +1261,7 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
* We need to pick up the new inode size which generic_commit_write gave us
* `file' can be NULL - eg, when called from page_symlink().
*
- * ext4 never places buffers on inode->i_mapping->private_list. metadata
+ * ext4 never places buffers on inode->i_mapping->i_private_list. metadata
* buffers are managed internally.
*/
static int ext4_write_end(struct file *file,
@@ -3213,7 +3213,7 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
}
/* Any metadata buffers to write? */
- if (!list_empty(&inode->i_mapping->private_list))
+ if (!list_empty(&inode->i_mapping->i_private_list))
return true;
return inode->i_state & I_DIRTY_DATASYNC;
}
@@ -3564,7 +3564,7 @@ static const struct address_space_operations ext4_aops = {
.direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
@@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio_norefs,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
@@ -3598,7 +3598,7 @@ static const struct address_space_operations ext4_da_aops = {
.direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 4f931f80cb34..aa6be510eb8f 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -819,11 +819,11 @@ int ext4_force_shutdown(struct super_block *sb, u32 flags)
switch (flags) {
case EXT4_GOING_FLAGS_DEFAULT:
- ret = freeze_bdev(sb->s_bdev);
+ ret = bdev_freeze(sb->s_bdev);
if (ret)
return ret;
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
- thaw_bdev(sb->s_bdev);
+ bdev_thaw(sb->s_bdev);
break;
case EXT4_GOING_FLAGS_LOGFLUSH:
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index dfdd7e5cf038..312bc6813357 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -444,7 +444,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
folio_clear_error(folio);
/*
- * Comments copied from block_write_full_page:
+ * Comments copied from block_write_full_folio:
*
* The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c5fcf377ab1f..0980845c8b8f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5864,11 +5864,9 @@ static struct bdev_handle *ext4_get_journal_blkdev(struct super_block *sb,
struct ext4_super_block *es;
int errno;
- /* see get_tree_bdev why this is needed and safe */
- up_write(&sb->s_umount);
- bdev_handle = bdev_open_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- sb, &fs_holder_ops);
- down_write(&sb->s_umount);
+ bdev_handle = bdev_open_by_dev(j_dev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
+ sb, &fs_holder_ops);
if (IS_ERR(bdev_handle)) {
ext4_msg(sb, KERN_ERR,
"failed to open journal device unknown-block(%u,%u) %ld",
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 36e5dab6baae..6b2af514660d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1944,7 +1944,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
continue;
}
- generic_error_remove_page(mapping, &folio->page);
+ generic_error_remove_folio(mapping, folio);
folio_unlock(folio);
}
folio_batch_release(&fbatch);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index e50363583f01..4580dfefd5e9 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2239,11 +2239,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
- ret = freeze_bdev(sb->s_bdev);
+ ret = bdev_freeze(sb->s_bdev);
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- thaw_bdev(sb->s_bdev);
+ bdev_thaw(sb->s_bdev);
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 560bfcad1af2..a9eb3891f417 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -600,7 +600,7 @@ make_now:
#ifdef CONFIG_F2FS_FS_COMPRESSION
inode->i_mapping->a_ops = &f2fs_compress_aops;
/*
- * generic_error_remove_page only truncates pages of regular
+ * generic_error_remove_folio only truncates pages of regular
* inode
*/
inode->i_mode |= S_IFREG;
diff --git a/fs/file.c b/fs/file.c
index 5fb0b146e79e..3b683b9101d8 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -629,19 +629,23 @@ void fd_install(unsigned int fd, struct file *file)
EXPORT_SYMBOL(fd_install);
/**
- * pick_file - return file associatd with fd
+ * file_close_fd_locked - return file associated with fd
* @files: file struct to retrieve file from
* @fd: file descriptor to retrieve file for
*
+ * Doesn't take a separate reference count.
+ *
* Context: files_lock must be held.
*
* Returns: The file associated with @fd (NULL if @fd is not open)
*/
-static struct file *pick_file(struct files_struct *files, unsigned fd)
+struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
{
struct fdtable *fdt = files_fdtable(files);
struct file *file;
+ lockdep_assert_held(&files->file_lock);
+
if (fd >= fdt->max_fds)
return NULL;
@@ -660,7 +664,7 @@ int close_fd(unsigned fd)
struct file *file;
spin_lock(&files->file_lock);
- file = pick_file(files, fd);
+ file = file_close_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (!file)
return -EBADF;
@@ -707,7 +711,7 @@ static inline void __range_close(struct files_struct *files, unsigned int fd,
max_fd = min(max_fd, n);
for (; fd <= max_fd; fd++) {
- file = pick_file(files, fd);
+ file = file_close_fd_locked(files, fd);
if (file) {
spin_unlock(&files->file_lock);
filp_close(file, files);
@@ -795,26 +799,21 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
return 0;
}
-/*
- * See close_fd_get_file() below, this variant assumes current->files->file_lock
- * is held.
- */
-struct file *__close_fd_get_file(unsigned int fd)
-{
- return pick_file(current->files, fd);
-}
-
-/*
- * variant of close_fd that gets a ref on the file for later fput.
- * The caller must ensure that filp_close() called on the file.
+/**
+ * file_close_fd - return file associated with fd
+ * @fd: file descriptor to retrieve file for
+ *
+ * Doesn't take a separate reference count.
+ *
+ * Returns: The file associated with @fd (NULL if @fd is not open)
*/
-struct file *close_fd_get_file(unsigned int fd)
+struct file *file_close_fd(unsigned int fd)
{
struct files_struct *files = current->files;
struct file *file;
spin_lock(&files->file_lock);
- file = pick_file(files, fd);
+ file = file_close_fd_locked(files, fd);
spin_unlock(&files->file_lock);
return file;
@@ -959,31 +958,45 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
struct file *file;
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
struct file __rcu **fdentry;
+ unsigned long nospec_mask;
- if (unlikely(fd >= fdt->max_fds))
- return NULL;
-
- fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
+ /* Mask is a 0 for invalid fd's, ~0 for valid ones */
+ nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
/*
- * Ok, we have a file pointer. However, because we do
- * this all locklessly under RCU, we may be racing with
- * that file being closed.
- *
- * Such a race can take two forms:
- *
- * (a) the file ref already went down to zero and the
- * file hasn't been reused yet or the file count
- * isn't zero but the file has already been reused.
+ * fdentry points to the 'fd' offset, or fdt->fd[0].
+ * Loading from fdt->fd[0] is always safe, because the
+ * array always exists.
*/
- file = __get_file_rcu(fdentry);
+ fdentry = fdt->fd + (fd & nospec_mask);
+
+ /* Do the load, then mask any invalid result */
+ file = rcu_dereference_raw(*fdentry);
+ file = (void *)(nospec_mask & (unsigned long)file);
if (unlikely(!file))
return NULL;
- if (unlikely(IS_ERR(file)))
+ /*
+ * Ok, we have a file pointer that was valid at
+ * some point, but it might have become stale since.
+ *
+ * We need to confirm it by incrementing the refcount
+ * and then check the lookup again.
+ *
+ * atomic_long_inc_not_zero() gives us a full memory
+ * barrier. We only really need an 'acquire' one to
+ * protect the loads below, but we don't have that.
+ */
+ if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
continue;
/*
+ * Such a race can take two forms:
+ *
+ * (a) the file ref already went down to zero and the
+ * file hasn't been reused yet or the file count
+ * isn't zero but the file has already been reused.
+ *
* (b) the file table entry has changed under us.
* Note that we don't need to re-check the 'fdt->fd'
* pointer having changed, because it always goes
@@ -991,7 +1004,8 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
*
* If so, we need to put our ref and try again.
*/
- if (unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
+ if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
+ unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
fput(file);
continue;
}
@@ -1128,13 +1142,13 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
* atomic_read_acquire() pairs with atomic_dec_and_test() in
* put_files_struct().
*/
- if (atomic_read_acquire(&files->count) == 1) {
+ if (likely(atomic_read_acquire(&files->count) == 1)) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
return 0;
return (unsigned long)file;
} else {
- file = __fget(fd, mask);
+ file = __fget_files(files, fd, mask);
if (!file)
return 0;
return FDPUT_FPUT | (unsigned long)file;
@@ -1282,7 +1296,7 @@ out_unlock:
}
/**
- * __receive_fd() - Install received file into file descriptor table
+ * receive_fd() - Install received file into file descriptor table
* @file: struct file that was received from another process
* @ufd: __user pointer to write new fd number to
* @o_flags: the O_* flags to apply to the new fd entry
@@ -1296,7 +1310,7 @@ out_unlock:
*
* Returns newly install fd or -ve on error.
*/
-int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
{
int new_fd;
int error;
@@ -1321,6 +1335,7 @@ int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
__receive_sock(file);
return new_fd;
}
+EXPORT_SYMBOL_GPL(receive_fd);
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
{
@@ -1336,12 +1351,6 @@ int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
return new_fd;
}
-int receive_fd(struct file *file, unsigned int o_flags)
-{
- return __receive_fd(file, NULL, o_flags);
-}
-EXPORT_SYMBOL_GPL(receive_fd);
-
static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
{
int err = -EBADF;
diff --git a/fs/file_table.c b/fs/file_table.c
index de4a2915bfd4..3ba764d73fc9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -75,18 +75,6 @@ static inline void file_free(struct file *f)
}
}
-void release_empty_file(struct file *f)
-{
- WARN_ON_ONCE(f->f_mode & (FMODE_BACKING | FMODE_OPENED));
- if (atomic_long_dec_and_test(&f->f_count)) {
- security_file_free(f);
- put_cred(f->f_cred);
- if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
- percpu_counter_dec(&nr_files);
- kmem_cache_free(filp_cachep, f);
- }
-}
-
/*
* Return the total number of open files in the system
*/
@@ -419,7 +407,7 @@ static void delayed_fput(struct work_struct *unused)
static void ____fput(struct callback_head *work)
{
- __fput(container_of(work, struct file, f_rcuhead));
+ __fput(container_of(work, struct file, f_task_work));
}
/*
@@ -445,9 +433,13 @@ void fput(struct file *file)
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
+ if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
+ file_free(file);
+ return;
+ }
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_rcuhead, TWA_RESUME))
+ init_task_work(&file->f_task_work, ____fput);
+ if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
return;
/*
* After this task has run exit_task_work(),
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index de2a5bccb930..26d367e3668d 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -29,7 +29,7 @@ vxfs_typdump(struct vxfs_typed *typ)
/**
* vxfs_bmap_ext4 - do bmap for ext4 extents
* @ip: pointer to the inode we do bmap for
- * @iblock: logical block.
+ * @bn: logical block.
*
* Description:
* vxfs_bmap_ext4 performs the bmap operation for inodes with
@@ -97,7 +97,7 @@ fail_buf:
* vxfs_bmap_indir reads a &struct vxfs_typed at @indir
* and performs the type-defined action.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*
* Note:
@@ -179,7 +179,7 @@ out:
* Description:
* Performs the bmap operation for typed extents.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*/
static daddr_t
@@ -243,7 +243,7 @@ vxfs_bmap_typed(struct inode *ip, long iblock)
* vxfs_bmap1 perfoms a logical to physical block mapping
* for vxfs-internal purposes.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*/
daddr_t
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index 9b49ec36e667..ed51fcd34757 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -15,7 +15,7 @@
/**
* vxfs_immed_read_folio - read part of an immed inode into pagecache
- * @file: file context (unused)
+ * @fp: file context (unused)
* @folio: folio to fill in.
*
* Description:
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index f04ba2ed1e1a..1b0bca8b4cc6 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -177,8 +177,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
/**
* vxfs_readdir - read a directory
* @fp: the directory to read
- * @retp: return buffer
- * @filler: filldir callback
+ * @ctx: dir_context for filldir/readdir
*
* Description:
* vxfs_readdir fills @retp with directory entries from @fp
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a660f1f21540..148a71b8b4d0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,6 +19,7 @@
#include <linux/uio.h>
#include <linux/fs.h>
#include <linux/filelock.h>
+#include <linux/splice.h>
static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, int opcode,
@@ -3195,8 +3196,8 @@ static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
len, flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src_file, src_off, dst_file,
- dst_off, len, flags);
+ ret = splice_copy_file_range(src_file, src_off, dst_file,
+ dst_off, len);
return ret;
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9611bfceda4b..9914d7f54f7d 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -82,11 +82,11 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
}
/**
- * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
+ * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
* @folio: The folio to write
* @wbc: The writeback control
*
- * This is the same as calling block_write_full_page, but it also
+ * This is the same as calling block_write_full_folio, but it also
* writes pages outside of i_size
*/
static int gfs2_write_jdata_folio(struct folio *folio,
@@ -108,7 +108,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
folio_size(folio));
return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
- wbc, end_buffer_async_write);
+ wbc);
}
/**
@@ -403,18 +403,18 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
}
/**
- * stuffed_readpage - Fill in a Linux folio with stuffed file data
+ * stuffed_read_folio - Fill in a Linux folio with stuffed file data
* @ip: the inode
* @folio: the folio
*
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
+static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
{
- struct buffer_head *dibh;
- size_t i_size = i_size_read(&ip->i_inode);
- void *data;
- int error;
+ struct buffer_head *dibh = NULL;
+ size_t dsize = i_size_read(&ip->i_inode);
+ void *from = NULL;
+ int error = 0;
/*
* Due to the order of unstuffing files and ->fault(), we can be
@@ -422,22 +422,20 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
* so we need to supply one here. It doesn't happen often.
*/
if (unlikely(folio->index)) {
- folio_zero_range(folio, 0, folio_size(folio));
- folio_mark_uptodate(folio);
- return 0;
+ dsize = 0;
+ } else {
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ goto out;
+ from = dibh->b_data + sizeof(struct gfs2_dinode);
}
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error)
- return error;
-
- data = dibh->b_data + sizeof(struct gfs2_dinode);
- memcpy_to_folio(folio, 0, data, i_size);
- folio_zero_range(folio, i_size, folio_size(folio) - i_size);
+ folio_fill_tail(folio, 0, from, dsize);
brelse(dibh);
- folio_mark_uptodate(folio);
+out:
+ folio_end_read(folio, error == 0);
- return 0;
+ return error;
}
/**
@@ -456,8 +454,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
- error = stuffed_readpage(ip, folio);
- folio_unlock(folio);
+ error = stuffed_read_folio(ip, folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
}
@@ -748,7 +745,7 @@ static const struct address_space_operations gfs2_aops = {
.bmap = gfs2_bmap,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
static const struct address_space_operations gfs2_jdata_aops = {
@@ -761,7 +758,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
.invalidate_folio = gfs2_invalidate_folio,
.release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
void gfs2_set_aops(struct inode *inode)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d6bf1f8c25dc..d8b619ed2f1e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1213,7 +1213,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping->host = s->s_bdev->bd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->private_data = NULL;
+ mapping->i_private_data = NULL;
mapping->writeback_index = 0;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b108c5d26839..00ce89bdf32c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -117,7 +117,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mapping->host = sb->s_bdev->bd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->private_data = NULL;
+ mapping->i_private_data = NULL;
mapping->writeback_index = 0;
spin_lock_init(&sdp->sd_log_lock);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 95dae7838b4e..b57f8c7b35be 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -271,7 +271,7 @@ static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
if (qd->qd_sbd != sdp)
continue;
if (lockref_get_not_dead(&qd->qd_lockref)) {
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
return qd;
}
}
@@ -344,7 +344,7 @@ static void qd_put(struct gfs2_quota_data *qd)
}
qd->qd_lockref.count = 0;
- list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
+ list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
}
@@ -1517,7 +1517,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
lockref_mark_dead(&qd->qd_lockref);
spin_unlock(&qd->qd_lockref.lock);
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a7bc4690a780..8c34798a0715 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -29,11 +29,6 @@ static const struct inode_operations hfs_file_inode_operations;
#define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
-static int hfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, hfs_get_block, wbc);
-}
-
static int hfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_folio(folio, hfs_get_block);
@@ -162,9 +157,10 @@ const struct address_space_operations hfs_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfs_read_folio,
- .writepage = hfs_writepage,
+ .writepages = hfs_writepages,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = hfs_bmap,
.release_folio = hfs_release_folio,
};
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 702a0663b1d8..3d326926c195 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -28,11 +28,6 @@ static int hfsplus_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, hfsplus_get_block);
}
-static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, hfsplus_get_block, wbc);
-}
-
static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
@@ -159,9 +154,10 @@ const struct address_space_operations hfsplus_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfsplus_read_folio,
- .writepage = hfsplus_writepage,
+ .writepages = hfsplus_writepages,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = hfsplus_bmap,
.release_folio = hfsplus_release_folio,
};
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 0b791adf02e5..b0cb70400996 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -30,8 +30,7 @@ struct hfsplus_wd {
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
- * @op: direction of I/O
- * @op_flags: request op flags
+ * @opf: request op flags
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
@@ -43,6 +42,8 @@ struct hfsplus_wd {
* that starts at the rounded-down address. As long as the data was
* read using hfsplus_submit_bio() and the same buffer is used things
* will work correctly.
+ *
+ * Returns: %0 on success else -errno code
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, blk_opf_t opf)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index f757d4f7ad98..ea5b8e57d904 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -686,7 +686,7 @@ static void hugetlbfs_evict_inode(struct inode *inode)
* at inode creation time. If this is a device special inode,
* i_mapping may not point to the original address space.
*/
- resv_map = (struct resv_map *)(&inode->i_data)->private_data;
+ resv_map = (struct resv_map *)(&inode->i_data)->i_private_data;
/* Only regular and link inodes have associated reserve maps */
if (resv_map)
resv_map_release(&resv_map->refs);
@@ -1000,7 +1000,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
simple_inode_init_ts(inode);
- inode->i_mapping->private_data = resv_map;
+ inode->i_mapping->i_private_data = resv_map;
info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
default:
@@ -1129,8 +1129,8 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
#define hugetlbfs_migrate_folio NULL
#endif
-static int hugetlbfs_error_remove_page(struct address_space *mapping,
- struct page *page)
+static int hugetlbfs_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
return 0;
}
@@ -1277,7 +1277,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.write_end = hugetlbfs_write_end,
.dirty_folio = noop_dirty_folio,
.migrate_folio = hugetlbfs_migrate_folio,
- .error_remove_page = hugetlbfs_error_remove_page,
+ .error_remove_folio = hugetlbfs_error_remove_folio,
};
diff --git a/fs/inode.c b/fs/inode.c
index f238d987dec9..99d8754a74a3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -209,7 +209,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
atomic_set(&mapping->nr_thps, 0);
#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
- mapping->private_data = NULL;
+ mapping->i_private_data = NULL;
mapping->writeback_index = 0;
init_rwsem(&mapping->invalidate_lock);
lockdep_set_class_and_name(&mapping->invalidate_lock,
@@ -398,8 +398,8 @@ static void __address_space_init_once(struct address_space *mapping)
{
xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
init_rwsem(&mapping->i_mmap_rwsem);
- INIT_LIST_HEAD(&mapping->private_list);
- spin_lock_init(&mapping->private_lock);
+ INIT_LIST_HEAD(&mapping->i_private_list);
+ spin_lock_init(&mapping->i_private_lock);
mapping->i_mmap = RB_ROOT_CACHED;
}
@@ -464,7 +464,7 @@ static void __inode_add_lru(struct inode *inode, bool rotate)
if (!mapping_shrinkable(&inode->i_data))
return;
- if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
+ if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode->i_state |= I_REFERENCED;
@@ -482,7 +482,7 @@ void inode_add_lru(struct inode *inode)
static void inode_lru_list_del(struct inode *inode)
{
- if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
+ if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
@@ -620,7 +620,7 @@ void clear_inode(struct inode *inode)
* nor even WARN_ON(!mapping_empty).
*/
xa_unlock_irq(&inode->i_data.i_pages);
- BUG_ON(!list_empty(&inode->i_data.private_list));
+ BUG_ON(!list_empty(&inode->i_data.i_private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
BUG_ON(!list_empty(&inode->i_wb_list));
@@ -1836,37 +1836,37 @@ EXPORT_SYMBOL(bmap);
* earlier than or equal to either the ctime or mtime,
* or if at least a day has passed since the last atime update.
*/
-static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
+static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
struct timespec64 atime, mtime, ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
- return 1;
+ return true;
/*
* Is mtime younger than or equal to atime? If yes, update atime:
*/
atime = inode_get_atime(inode);
mtime = inode_get_mtime(inode);
if (timespec64_compare(&mtime, &atime) >= 0)
- return 1;
+ return true;
/*
* Is ctime younger than or equal to atime? If yes, update atime:
*/
ctime = inode_get_ctime(inode);
if (timespec64_compare(&ctime, &atime) >= 0)
- return 1;
+ return true;
/*
* Is the previous atime value older than a day? If yes,
* update atime:
*/
if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
- return 1;
+ return true;
/*
* Good, we can skip the atime update:
*/
- return 0;
+ return false;
}
/**
@@ -2404,7 +2404,7 @@ EXPORT_SYMBOL(inode_init_owner);
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode)
diff --git a/fs/internal.h b/fs/internal.h
index 58e43341aebf..bf2ee2e0d45d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -83,6 +83,8 @@ int path_mount(const char *dev_name, struct path *path,
const char *type_page, unsigned long flags, void *data_page);
int path_umount(struct path *path, int flags);
+int show_path(struct seq_file *m, struct dentry *root);
+
/*
* fs_struct.c
*/
@@ -94,7 +96,6 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
struct file *alloc_empty_file(int flags, const struct cred *cred);
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
-void release_empty_file(struct file *f);
static inline void file_put_write_access(struct file *file)
{
@@ -180,7 +181,7 @@ extern struct file *do_file_open_root(const struct path *,
const char *, const struct open_flags *);
extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
-extern struct file *__close_fd_get_file(unsigned int fd);
+struct file *file_close_fd_locked(struct files_struct *files, unsigned fd);
long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
@@ -243,10 +244,10 @@ int do_statx(int dfd, struct filename *filename, unsigned int flags,
/*
* fs/splice.c:
*/
-long splice_file_to_pipe(struct file *in,
- struct pipe_inode_info *opipe,
- loff_t *offset,
- size_t len, unsigned int flags);
+ssize_t splice_file_to_pipe(struct file *in,
+ struct pipe_inode_info *opipe,
+ loff_t *offset,
+ size_t len, unsigned int flags);
/*
* fs/xattr.c:
diff --git a/fs/ioctl.c b/fs/ioctl.c
index f5fd99d6b0d4..76cf22ac97d7 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -920,8 +920,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
if (!f.file)
return -EBADF;
- /* RED-PEN how should LSM module know it's handling 32bit? */
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl_compat(f.file, cmd, arg);
if (error)
goto out;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f72df2babe56..093c4515b22a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -305,28 +305,18 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
{
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
- size_t poff = offset_in_page(iomap->offset);
size_t offset = offset_in_folio(folio, iomap->offset);
- void *addr;
if (folio_test_uptodate(folio))
return 0;
- if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
- return -EIO;
- if (WARN_ON_ONCE(size > PAGE_SIZE -
- offset_in_page(iomap->inline_data)))
- return -EIO;
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
if (offset > 0)
ifs_alloc(iter->inode, folio, iter->flags);
- addr = kmap_local_folio(folio, offset);
- memcpy(addr, iomap->inline_data, size);
- memset(addr + size, 0, PAGE_SIZE - poff - size);
- kunmap_local(addr);
- iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
+ folio_fill_tail(folio, offset, iomap->inline_data, size);
+ iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
return 0;
}
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 9d26b1b9fc01..0925caab23c4 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -157,7 +157,7 @@ __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
kfree(buf);
}
-void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
+static void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *jeb;
uint32_t free = 0, dirty = 0, used = 0, wasted = 0,
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index f8af6c3ae336..73f37f298087 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/highuid.h>
+#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
@@ -397,9 +398,10 @@ static int minix_get_block(struct inode *inode, sector_t block,
return V2_minix_get_block(inode, block, bh_result, create);
}
-static int minix_writepage(struct page *page, struct writeback_control *wbc)
+static int minix_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, minix_get_block, wbc);
+ return mpage_writepages(mapping, wbc, minix_get_block);
}
static int minix_read_folio(struct file *file, struct folio *folio)
@@ -444,9 +446,10 @@ static const struct address_space_operations minix_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = minix_read_folio,
- .writepage = minix_writepage,
+ .writepages = minix_writepages,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = minix_bmap,
.direct_IO = noop_direct_IO
};
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 57d1dedf3f8f..64c5205e2b5e 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -9,8 +9,16 @@
#include "internal.h"
+/*
+ * Outside of this file vfs{g,u}id_t are always created from k{g,u}id_t,
+ * never from raw values. These are just internal helpers.
+ */
+#define VFSUIDT_INIT_RAW(val) (vfsuid_t){ val }
+#define VFSGIDT_INIT_RAW(val) (vfsgid_t){ val }
+
struct mnt_idmap {
- struct user_namespace *owner;
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
refcount_t count;
};
@@ -20,25 +28,11 @@ struct mnt_idmap {
* mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
*/
struct mnt_idmap nop_mnt_idmap = {
- .owner = &init_user_ns,
.count = REFCOUNT_INIT(1),
};
EXPORT_SYMBOL_GPL(nop_mnt_idmap);
/**
- * check_fsmapping - check whether an mount idmapping is allowed
- * @idmap: idmap of the relevent mount
- * @sb: super block of the filesystem
- *
- * Return: true if @idmap is allowed, false if not.
- */
-bool check_fsmapping(const struct mnt_idmap *idmap,
- const struct super_block *sb)
-{
- return idmap->owner != sb->s_user_ns;
-}
-
-/**
* initial_idmapping - check whether this is the initial mapping
* @ns: idmapping to check
*
@@ -53,26 +47,6 @@ static inline bool initial_idmapping(const struct user_namespace *ns)
}
/**
- * no_idmapping - check whether we can skip remapping a kuid/gid
- * @mnt_userns: the mount's idmapping
- * @fs_userns: the filesystem's idmapping
- *
- * This function can be used to check whether a remapping between two
- * idmappings is required.
- * An idmapped mount is a mount that has an idmapping attached to it that
- * is different from the filsystem's idmapping and the initial idmapping.
- * If the initial mapping is used or the idmapping of the mount and the
- * filesystem are identical no remapping is required.
- *
- * Return: true if remapping can be skipped, false if not.
- */
-static inline bool no_idmapping(const struct user_namespace *mnt_userns,
- const struct user_namespace *fs_userns)
-{
- return initial_idmapping(mnt_userns) || mnt_userns == fs_userns;
-}
-
-/**
* make_vfsuid - map a filesystem kuid according to an idmapping
* @idmap: the mount's idmapping
* @fs_userns: the filesystem's idmapping
@@ -81,8 +55,8 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns,
* Take a @kuid and remap it from @fs_userns into @idmap. Use this
* function when preparing a @kuid to be reported to userspace.
*
- * If no_idmapping() determines that this is not an idmapped mount we can
- * simply return @kuid unchanged.
+ * If initial_idmapping() determines that this is not an idmapped mount
+ * we can simply return @kuid unchanged.
* If initial_idmapping() tells us that the filesystem is not mounted with an
* idmapping we know the value of @kuid won't change when calling
* from_kuid() so we can simply retrieve the value via __kuid_val()
@@ -94,13 +68,12 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns,
*/
vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
- struct user_namespace *fs_userns,
- kuid_t kuid)
+ struct user_namespace *fs_userns,
+ kuid_t kuid)
{
uid_t uid;
- struct user_namespace *mnt_userns = idmap->owner;
- if (no_idmapping(mnt_userns, fs_userns))
+ if (idmap == &nop_mnt_idmap)
return VFSUIDT_INIT(kuid);
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
@@ -108,7 +81,7 @@ vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
uid = from_kuid(fs_userns, kuid);
if (uid == (uid_t)-1)
return INVALID_VFSUID;
- return VFSUIDT_INIT(make_kuid(mnt_userns, uid));
+ return VFSUIDT_INIT_RAW(map_id_down(&idmap->uid_map, uid));
}
EXPORT_SYMBOL_GPL(make_vfsuid);
@@ -121,8 +94,8 @@ EXPORT_SYMBOL_GPL(make_vfsuid);
* Take a @kgid and remap it from @fs_userns into @idmap. Use this
* function when preparing a @kgid to be reported to userspace.
*
- * If no_idmapping() determines that this is not an idmapped mount we can
- * simply return @kgid unchanged.
+ * If initial_idmapping() determines that this is not an idmapped mount
+ * we can simply return @kgid unchanged.
* If initial_idmapping() tells us that the filesystem is not mounted with an
* idmapping we know the value of @kgid won't change when calling
* from_kgid() so we can simply retrieve the value via __kgid_val()
@@ -136,9 +109,8 @@ vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, kgid_t kgid)
{
gid_t gid;
- struct user_namespace *mnt_userns = idmap->owner;
- if (no_idmapping(mnt_userns, fs_userns))
+ if (idmap == &nop_mnt_idmap)
return VFSGIDT_INIT(kgid);
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
@@ -146,7 +118,7 @@ vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
gid = from_kgid(fs_userns, kgid);
if (gid == (gid_t)-1)
return INVALID_VFSGID;
- return VFSGIDT_INIT(make_kgid(mnt_userns, gid));
+ return VFSGIDT_INIT_RAW(map_id_down(&idmap->gid_map, gid));
}
EXPORT_SYMBOL_GPL(make_vfsgid);
@@ -165,11 +137,10 @@ kuid_t from_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, vfsuid_t vfsuid)
{
uid_t uid;
- struct user_namespace *mnt_userns = idmap->owner;
- if (no_idmapping(mnt_userns, fs_userns))
+ if (idmap == &nop_mnt_idmap)
return AS_KUIDT(vfsuid);
- uid = from_kuid(mnt_userns, AS_KUIDT(vfsuid));
+ uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
if (uid == (uid_t)-1)
return INVALID_UID;
if (initial_idmapping(fs_userns))
@@ -193,11 +164,10 @@ kgid_t from_vfsgid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, vfsgid_t vfsgid)
{
gid_t gid;
- struct user_namespace *mnt_userns = idmap->owner;
- if (no_idmapping(mnt_userns, fs_userns))
+ if (idmap == &nop_mnt_idmap)
return AS_KGIDT(vfsgid);
- gid = from_kgid(mnt_userns, AS_KGIDT(vfsgid));
+ gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
if (gid == (gid_t)-1)
return INVALID_GID;
if (initial_idmapping(fs_userns))
@@ -228,16 +198,91 @@ int vfsgid_in_group_p(vfsgid_t vfsgid)
#endif
EXPORT_SYMBOL_GPL(vfsgid_in_group_p);
+static int copy_mnt_idmap(struct uid_gid_map *map_from,
+ struct uid_gid_map *map_to)
+{
+ struct uid_gid_extent *forward, *reverse;
+ u32 nr_extents = READ_ONCE(map_from->nr_extents);
+ /* Pairs with smp_wmb() when writing the idmapping. */
+ smp_rmb();
+
+ /*
+ * Don't blindly copy @map_to into @map_from if nr_extents is
+ * smaller or equal to UID_GID_MAP_MAX_BASE_EXTENTS. Since we
+ * read @nr_extents someone could have written an idmapping and
+ * then we might end up with inconsistent data. So just don't do
+ * anything at all.
+ */
+ if (nr_extents == 0)
+ return 0;
+
+ /*
+ * Here we know that nr_extents is greater than zero which means
+ * a map has been written. Since idmappings can't be changed
+ * once they have been written we know that we can safely copy
+ * from @map_to into @map_from.
+ */
+
+ if (nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
+ *map_to = *map_from;
+ return 0;
+ }
+
+ forward = kmemdup(map_from->forward,
+ nr_extents * sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
+ if (!forward)
+ return -ENOMEM;
+
+ reverse = kmemdup(map_from->reverse,
+ nr_extents * sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
+ if (!reverse) {
+ kfree(forward);
+ return -ENOMEM;
+ }
+
+ /*
+ * The idmapping isn't exposed anywhere so we don't need to care
+ * about ordering between extent pointers and @nr_extents
+ * initialization.
+ */
+ map_to->forward = forward;
+ map_to->reverse = reverse;
+ map_to->nr_extents = nr_extents;
+ return 0;
+}
+
+static void free_mnt_idmap(struct mnt_idmap *idmap)
+{
+ if (idmap->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
+ kfree(idmap->uid_map.forward);
+ kfree(idmap->uid_map.reverse);
+ }
+ if (idmap->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
+ kfree(idmap->gid_map.forward);
+ kfree(idmap->gid_map.reverse);
+ }
+ kfree(idmap);
+}
+
struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
{
struct mnt_idmap *idmap;
+ int ret;
idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT);
if (!idmap)
return ERR_PTR(-ENOMEM);
- idmap->owner = get_user_ns(mnt_userns);
refcount_set(&idmap->count, 1);
+ ret = copy_mnt_idmap(&mnt_userns->uid_map, &idmap->uid_map);
+ if (!ret)
+ ret = copy_mnt_idmap(&mnt_userns->gid_map, &idmap->gid_map);
+ if (ret) {
+ free_mnt_idmap(idmap);
+ idmap = ERR_PTR(ret);
+ }
return idmap;
}
@@ -267,9 +312,7 @@ EXPORT_SYMBOL_GPL(mnt_idmap_get);
*/
void mnt_idmap_put(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count)) {
- put_user_ns(idmap->owner);
- kfree(idmap);
- }
+ if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count))
+ free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
diff --git a/fs/mount.h b/fs/mount.h
index 130c07c2f8d2..4a42fc68f4cc 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -8,19 +8,13 @@
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
- /*
- * Traversal and modification of .list is protected by either
- * - taking namespace_sem for write, OR
- * - taking namespace_sem for read AND taking .ns_lock.
- */
- struct list_head list;
- spinlock_t ns_lock;
+ struct rb_root mounts; /* Protected by namespace_sem */
struct user_namespace *user_ns;
struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
- unsigned int mounts; /* # of mounts in the namespace */
+ unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
} __randomize_layout;
@@ -55,7 +49,10 @@ struct mount {
struct list_head mnt_child; /* and going through their mnt_child */
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
- struct list_head mnt_list;
+ union {
+ struct rb_node mnt_node; /* Under ns->mounts */
+ struct list_head mnt_list;
+ };
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
struct list_head mnt_slave_list;/* list of slave mounts */
@@ -72,7 +69,8 @@ struct mount {
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
#endif
- int mnt_id; /* mount identifier */
+ int mnt_id; /* mount identifier, reused */
+ u64 mnt_id_unique; /* mount ID unique until reboot */
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
@@ -127,7 +125,6 @@ struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
int (*show)(struct seq_file *, struct vfsmount *);
- struct mount cursor;
};
extern const struct seq_operations mounts_op;
@@ -146,4 +143,12 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
return ns->seq == 0;
}
+static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
+{
+ WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB));
+ mnt->mnt.mnt_flags &= ~MNT_ONRB;
+ rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
+ list_add_tail(&mnt->mnt_list, dt_list);
+}
+
extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
diff --git a/fs/mpage.c b/fs/mpage.c
index ffb064ed9d04..738882e0766d 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -166,7 +166,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL;
@@ -205,6 +205,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
unsigned map_offset = block_in_file - args->first_logical_block;
unsigned last = nblocks - map_offset;
+ first_block = map_bh->b_blocknr + map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
clear_buffer_mapped(map_bh);
@@ -212,8 +213,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
}
if (page_block == blocks_per_page)
break;
- blocks[page_block] = map_bh->b_blocknr + map_offset +
- relative_block;
page_block++;
block_in_file++;
}
@@ -259,7 +258,9 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
+ if (!page_block)
+ first_block = map_bh->b_blocknr;
+ else if (first_block + page_block != map_bh->b_blocknr)
goto confused;
nblocks = map_bh->b_size >> blkbits;
for (relative_block = 0; ; relative_block++) {
@@ -268,7 +269,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
break;
} else if (page_block == blocks_per_page)
break;
- blocks[page_block] = map_bh->b_blocknr+relative_block;
page_block++;
block_in_file++;
}
@@ -289,7 +289,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
/*
* This folio will go to BIO. Do we need to send this BIO off first?
*/
- if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
+ if (args->bio && (args->last_block_in_bio != first_block - 1))
args->bio = mpage_bio_submit_read(args->bio);
alloc_new:
@@ -298,7 +298,7 @@ alloc_new:
gfp);
if (args->bio == NULL)
goto confused;
- args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ args->bio->bi_iter.bi_sector = first_block << (blkbits - 9);
}
length = first_hole << blkbits;
@@ -313,7 +313,7 @@ alloc_new:
(first_hole != blocks_per_page))
args->bio = mpage_bio_submit_read(args->bio);
else
- args->last_block_in_bio = blocks[blocks_per_page - 1];
+ args->last_block_in_bio = first_block + blocks_per_page - 1;
out:
return args->bio;
@@ -430,13 +430,13 @@ struct mpage_data {
* We have our BIO, so we can now mark the buffers clean. Make
* sure to only clean buffers which we know we'll be writing.
*/
-static void clean_buffers(struct page *page, unsigned first_unmapped)
+static void clean_buffers(struct folio *folio, unsigned first_unmapped)
{
unsigned buffer_counter = 0;
- struct buffer_head *bh, *head;
- if (!page_has_buffers(page))
+ struct buffer_head *bh, *head = folio_buffers(folio);
+
+ if (!head)
return;
- head = page_buffers(page);
bh = head;
do {
@@ -451,18 +451,8 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
* read_folio would fail to serialize with the bh and it would read from
* disk before we reach the platter.
*/
- if (buffer_heads_over_limit && PageUptodate(page))
- try_to_free_buffers(page_folio(page));
-}
-
-/*
- * For situations where we want to clean all buffers attached to a page.
- * We don't need to calculate how many buffers are attached to the page,
- * we just need to specify a number larger than the maximum number of buffers.
- */
-void clean_page_buffers(struct page *page)
-{
- clean_buffers(page, ~0U);
+ if (buffer_heads_over_limit && folio_test_uptodate(folio))
+ try_to_free_buffers(folio);
}
static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
@@ -476,7 +466,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
sector_t last_block;
sector_t block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
unsigned first_unmapped = blocks_per_page;
struct block_device *bdev = NULL;
@@ -514,10 +504,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
if (!buffer_dirty(bh) || !buffer_uptodate(bh))
goto confused;
if (page_block) {
- if (bh->b_blocknr != blocks[page_block-1] + 1)
+ if (bh->b_blocknr != first_block + page_block)
goto confused;
+ } else {
+ first_block = bh->b_blocknr;
}
- blocks[page_block++] = bh->b_blocknr;
+ page_block++;
boundary = buffer_boundary(bh);
if (boundary) {
boundary_block = bh->b_blocknr;
@@ -566,10 +558,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
boundary_bdev = map_bh.b_bdev;
}
if (page_block) {
- if (map_bh.b_blocknr != blocks[page_block-1] + 1)
+ if (map_bh.b_blocknr != first_block + page_block)
goto confused;
+ } else {
+ first_block = map_bh.b_blocknr;
}
- blocks[page_block++] = map_bh.b_blocknr;
+ page_block++;
boundary = buffer_boundary(&map_bh);
bdev = map_bh.b_bdev;
if (block_in_file == last_block)
@@ -601,7 +595,7 @@ page_is_mapped:
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
- if (bio && mpd->last_block_in_bio != blocks[0] - 1)
+ if (bio && mpd->last_block_in_bio != first_block - 1)
bio = mpage_bio_submit_write(bio);
alloc_new:
@@ -609,7 +603,7 @@ alloc_new:
bio = bio_alloc(bdev, BIO_MAX_VECS,
REQ_OP_WRITE | wbc_to_write_flags(wbc),
GFP_NOFS);
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_iter.bi_sector = first_block << (blkbits - 9);
wbc_init_bio(wbc, bio);
}
@@ -625,7 +619,7 @@ alloc_new:
goto alloc_new;
}
- clean_buffers(&folio->page, first_unmapped);
+ clean_buffers(folio, first_unmapped);
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
@@ -637,7 +631,7 @@ alloc_new:
boundary_block, 1 << blkbits);
}
} else {
- mpd->last_block_in_bio = blocks[blocks_per_page - 1];
+ mpd->last_block_in_bio = first_block + blocks_per_page - 1;
}
goto out;
@@ -648,7 +642,7 @@ confused:
/*
* The caller has a ref on the inode, so *mapping is stable
*/
- ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
+ ret = block_write_full_folio(folio, wbc, mpd->get_block);
mapping_set_error(mapping, ret);
out:
mpd->bio = bio;
diff --git a/fs/namei.c b/fs/namei.c
index 71c13b2990b4..faae721e4d63 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(putname);
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
static int check_acl(struct mnt_idmap *idmap,
struct inode *inode, int mask)
@@ -334,7 +334,7 @@ static int check_acl(struct mnt_idmap *idmap,
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
static int acl_permission_check(struct mnt_idmap *idmap,
struct inode *inode, int mask)
@@ -395,7 +395,7 @@ static int acl_permission_check(struct mnt_idmap *idmap,
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int generic_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
@@ -2467,7 +2467,7 @@ static int handle_lookup_down(struct nameidata *nd)
return PTR_ERR(step_into(nd, WALK_NOFOLLOW, nd->path.dentry));
}
-/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
+/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
@@ -2522,7 +2522,7 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
return retval;
}
-/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
+/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
@@ -3158,7 +3158,7 @@ static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool want_excl)
@@ -3646,7 +3646,7 @@ static int do_open(struct nameidata *nd,
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
static int vfs_tmpfile(struct mnt_idmap *idmap,
const struct path *parentpath,
@@ -3785,10 +3785,7 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- if (unlikely(file->f_mode & FMODE_OPENED))
- fput(file);
- else
- release_empty_file(file);
+ fput(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -3954,7 +3951,7 @@ EXPORT_SYMBOL(user_path_create);
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
@@ -4080,7 +4077,7 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
@@ -4161,7 +4158,7 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_rmdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry)
@@ -4290,7 +4287,7 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_unlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, struct inode **delegated_inode)
@@ -4443,7 +4440,7 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *oldname)
@@ -4535,7 +4532,7 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs @nop_mnt_idmap.
+ * raw inode simply pass @nop_mnt_idmap.
*/
int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
struct inode *dir, struct dentry *new_dentry,
diff --git a/fs/namespace.c b/fs/namespace.c
index fbf0e596fcd3..3beda4bb59af 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -32,6 +32,7 @@
#include <linux/fs_context.h>
#include <linux/shmem_fs.h>
#include <linux/mnt_idmapping.h>
+#include <linux/nospec.h>
#include "pnode.h"
#include "internal.h"
@@ -68,6 +69,9 @@ static u64 event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
+/* Don't allow confusion with old 32bit mount ID */
+static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
+
static struct hlist_head *mount_hashtable __ro_after_init;
static struct hlist_head *mountpoint_hashtable __ro_after_init;
static struct kmem_cache *mnt_cache __ro_after_init;
@@ -131,6 +135,7 @@ static int mnt_alloc_id(struct mount *mnt)
if (res < 0)
return res;
mnt->mnt_id = res;
+ mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
return 0;
}
@@ -730,21 +735,6 @@ struct vfsmount *lookup_mnt(const struct path *path)
return m;
}
-static inline void lock_ns_list(struct mnt_namespace *ns)
-{
- spin_lock(&ns->ns_lock);
-}
-
-static inline void unlock_ns_list(struct mnt_namespace *ns)
-{
- spin_unlock(&ns->ns_lock);
-}
-
-static inline bool mnt_is_cursor(struct mount *mnt)
-{
- return mnt->mnt.mnt_flags & MNT_CURSOR;
-}
-
/*
* __is_local_mountpoint - Test to see if dentry is a mountpoint in the
* current mount namespace.
@@ -763,19 +753,15 @@ static inline bool mnt_is_cursor(struct mount *mnt)
bool __is_local_mountpoint(struct dentry *dentry)
{
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
- struct mount *mnt;
+ struct mount *mnt, *n;
bool is_covered = false;
down_read(&namespace_sem);
- lock_ns_list(ns);
- list_for_each_entry(mnt, &ns->list, mnt_list) {
- if (mnt_is_cursor(mnt))
- continue;
+ rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
is_covered = (mnt->mnt_mountpoint == dentry);
if (is_covered)
break;
}
- unlock_ns_list(ns);
up_read(&namespace_sem);
return is_covered;
@@ -1022,6 +1008,30 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m
mnt_add_count(old_parent, -1);
}
+static inline struct mount *node_to_mount(struct rb_node *node)
+{
+ return node ? rb_entry(node, struct mount, mnt_node) : NULL;
+}
+
+static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
+{
+ struct rb_node **link = &ns->mounts.rb_node;
+ struct rb_node *parent = NULL;
+
+ WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
+ mnt->mnt_ns = ns;
+ while (*link) {
+ parent = *link;
+ if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+ rb_link_node(&mnt->mnt_node, parent, link);
+ rb_insert_color(&mnt->mnt_node, &ns->mounts);
+ mnt->mnt.mnt_flags |= MNT_ONRB;
+}
+
/*
* vfsmount lock must be held for write
*/
@@ -1035,12 +1045,13 @@ static void commit_tree(struct mount *mnt)
BUG_ON(parent == mnt);
list_add_tail(&head, &mnt->mnt_list);
- list_for_each_entry(m, &head, mnt_list)
- m->mnt_ns = n;
+ while (!list_empty(&head)) {
+ m = list_first_entry(&head, typeof(*m), mnt_list);
+ list_del(&m->mnt_list);
- list_splice(&head, n->list.prev);
-
- n->mounts += n->pending_mounts;
+ mnt_add_to_ns(n, m);
+ }
+ n->nr_mounts += n->pending_mounts;
n->pending_mounts = 0;
__attach_mnt(mnt, parent);
@@ -1188,7 +1199,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
- mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
atomic_inc(&sb->s_active);
mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
@@ -1413,65 +1424,57 @@ struct vfsmount *mnt_clone_internal(const struct path *path)
return &p->mnt;
}
-#ifdef CONFIG_PROC_FS
-static struct mount *mnt_list_next(struct mnt_namespace *ns,
- struct list_head *p)
+/*
+ * Returns the mount which either has the specified mnt_id, or has the next
+ * smallest id afer the specified one.
+ */
+static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
{
- struct mount *mnt, *ret = NULL;
+ struct rb_node *node = ns->mounts.rb_node;
+ struct mount *ret = NULL;
- lock_ns_list(ns);
- list_for_each_continue(p, &ns->list) {
- mnt = list_entry(p, typeof(*mnt), mnt_list);
- if (!mnt_is_cursor(mnt)) {
- ret = mnt;
- break;
+ while (node) {
+ struct mount *m = node_to_mount(node);
+
+ if (mnt_id <= m->mnt_id_unique) {
+ ret = node_to_mount(node);
+ if (mnt_id == m->mnt_id_unique)
+ break;
+ node = node->rb_left;
+ } else {
+ node = node->rb_right;
}
}
- unlock_ns_list(ns);
-
return ret;
}
+#ifdef CONFIG_PROC_FS
+
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_mounts *p = m->private;
- struct list_head *prev;
down_read(&namespace_sem);
- if (!*pos) {
- prev = &p->ns->list;
- } else {
- prev = &p->cursor.mnt_list;
- /* Read after we'd reached the end? */
- if (list_empty(prev))
- return NULL;
- }
-
- return mnt_list_next(p->ns, prev);
+ return mnt_find_id_at(p->ns, *pos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct proc_mounts *p = m->private;
- struct mount *mnt = v;
+ struct mount *next = NULL, *mnt = v;
+ struct rb_node *node = rb_next(&mnt->mnt_node);
++*pos;
- return mnt_list_next(p->ns, &mnt->mnt_list);
+ if (node) {
+ next = node_to_mount(node);
+ *pos = next->mnt_id_unique;
+ }
+ return next;
}
static void m_stop(struct seq_file *m, void *v)
{
- struct proc_mounts *p = m->private;
- struct mount *mnt = v;
-
- lock_ns_list(p->ns);
- if (mnt)
- list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
- else
- list_del_init(&p->cursor.mnt_list);
- unlock_ns_list(p->ns);
up_read(&namespace_sem);
}
@@ -1489,14 +1492,6 @@ const struct seq_operations mounts_op = {
.show = m_show,
};
-void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
-{
- down_read(&namespace_sem);
- lock_ns_list(ns);
- list_del(&cursor->mnt_list);
- unlock_ns_list(ns);
- up_read(&namespace_sem);
-}
#endif /* CONFIG_PROC_FS */
/**
@@ -1638,7 +1633,10 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
- list_move(&p->mnt_list, &tmp_list);
+ if (p->mnt.mnt_flags & MNT_ONRB)
+ move_from_ns(p, &tmp_list);
+ else
+ list_move(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
@@ -1658,7 +1656,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_del_init(&p->mnt_list);
ns = p->mnt_ns;
if (ns) {
- ns->mounts--;
+ ns->nr_mounts--;
__touch_mnt_namespace(ns);
}
p->mnt_ns = NULL;
@@ -1784,14 +1782,16 @@ static int do_umount(struct mount *mnt, int flags)
event++;
if (flags & MNT_DETACH) {
- if (!list_empty(&mnt->mnt_list))
+ if (mnt->mnt.mnt_flags & MNT_ONRB ||
+ !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
- if (!list_empty(&mnt->mnt_list))
+ if (mnt->mnt.mnt_flags & MNT_ONRB ||
+ !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
@@ -2209,9 +2209,9 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
unsigned int mounts = 0;
struct mount *p;
- if (ns->mounts >= max)
+ if (ns->nr_mounts >= max)
return -ENOSPC;
- max -= ns->mounts;
+ max -= ns->nr_mounts;
if (ns->pending_mounts >= max)
return -ENOSPC;
max -= ns->pending_mounts;
@@ -2355,8 +2355,12 @@ static int attach_recursive_mnt(struct mount *source_mnt,
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
if (source_mnt->mnt_ns) {
+ LIST_HEAD(head);
+
/* move from anon - the caller will destroy */
- list_del_init(&source_mnt->mnt_ns->list);
+ for (p = source_mnt; p; p = next_mnt(p, source_mnt))
+ move_from_ns(p, &head);
+ list_del_init(&head);
}
if (beneath)
mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
@@ -2667,11 +2671,10 @@ static struct file *open_detached_copy(struct path *path, bool recursive)
lock_mount_hash();
for (p = mnt; p; p = next_mnt(p, mnt)) {
- p->mnt_ns = ns;
- ns->mounts++;
+ mnt_add_to_ns(ns, p);
+ ns->nr_mounts++;
}
ns->root = mnt;
- list_add_tail(&ns->list, &mnt->mnt_list);
mntget(&mnt->mnt);
unlock_mount_hash();
namespace_unlock();
@@ -3026,6 +3029,7 @@ static inline bool path_overmounted(const struct path *path)
* can_move_mount_beneath - check that we can mount beneath the top mount
* @from: mount to mount beneath
* @to: mount under which to mount
+ * @mp: mountpoint of @to
*
* - Make sure that @to->dentry is actually the root of a mount under
* which we can mount another mount.
@@ -3734,9 +3738,8 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
if (!anon)
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
refcount_set(&new_ns->ns.count, 1);
- INIT_LIST_HEAD(&new_ns->list);
+ new_ns->mounts = RB_ROOT;
init_waitqueue_head(&new_ns->poll);
- spin_lock_init(&new_ns->ns_lock);
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
return new_ns;
@@ -3783,7 +3786,6 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
unlock_mount_hash();
}
new_ns->root = new;
- list_add_tail(&new_ns->list, &new->mnt_list);
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -3793,8 +3795,8 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
p = old;
q = new;
while (p) {
- q->mnt_ns = new_ns;
- new_ns->mounts++;
+ mnt_add_to_ns(new_ns, q);
+ new_ns->nr_mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt);
@@ -3836,10 +3838,9 @@ struct dentry *mount_subtree(struct vfsmount *m, const char *name)
mntput(m);
return ERR_CAST(ns);
}
- mnt->mnt_ns = ns;
ns->root = mnt;
- ns->mounts++;
- list_add(&mnt->mnt_list, &ns->list);
+ ns->nr_mounts++;
+ mnt_add_to_ns(ns, mnt);
err = vfs_path_lookup(m->mnt_root, m,
name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
@@ -4017,10 +4018,9 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
goto err_path;
}
mnt = real_mount(newmount.mnt);
- mnt->mnt_ns = ns;
ns->root = mnt;
- ns->mounts = 1;
- list_add(&mnt->mnt_list, &ns->list);
+ ns->nr_mounts = 1;
+ mnt_add_to_ns(ns, mnt);
mntget(newmount.mnt);
/* Attach to an apparent O_PATH fd with a note that we need to unmount
@@ -4288,7 +4288,7 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
* Creating an idmapped mount with the filesystem wide idmapping
* doesn't make sense so block that. We don't allow mushy semantics.
*/
- if (!check_fsmapping(kattr->mnt_idmap, m->mnt_sb))
+ if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
return -EINVAL;
/*
@@ -4676,6 +4676,438 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
}
+int show_path(struct seq_file *m, struct dentry *root)
+{
+ if (root->d_sb->s_op->show_path)
+ return root->d_sb->s_op->show_path(m, root);
+
+ seq_dentry(m, root, " \t\n\\");
+ return 0;
+}
+
+static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
+{
+ struct mount *mnt = mnt_find_id_at(ns, id);
+
+ if (!mnt || mnt->mnt_id_unique != id)
+ return NULL;
+
+ return &mnt->mnt;
+}
+
+struct kstatmount {
+ struct statmount __user *buf;
+ size_t bufsize;
+ struct vfsmount *mnt;
+ u64 mask;
+ struct path root;
+ struct statmount sm;
+ struct seq_file seq;
+};
+
+static u64 mnt_to_attr_flags(struct vfsmount *mnt)
+{
+ unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
+ u64 attr_flags = 0;
+
+ if (mnt_flags & MNT_READONLY)
+ attr_flags |= MOUNT_ATTR_RDONLY;
+ if (mnt_flags & MNT_NOSUID)
+ attr_flags |= MOUNT_ATTR_NOSUID;
+ if (mnt_flags & MNT_NODEV)
+ attr_flags |= MOUNT_ATTR_NODEV;
+ if (mnt_flags & MNT_NOEXEC)
+ attr_flags |= MOUNT_ATTR_NOEXEC;
+ if (mnt_flags & MNT_NODIRATIME)
+ attr_flags |= MOUNT_ATTR_NODIRATIME;
+ if (mnt_flags & MNT_NOSYMFOLLOW)
+ attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
+
+ if (mnt_flags & MNT_NOATIME)
+ attr_flags |= MOUNT_ATTR_NOATIME;
+ else if (mnt_flags & MNT_RELATIME)
+ attr_flags |= MOUNT_ATTR_RELATIME;
+ else
+ attr_flags |= MOUNT_ATTR_STRICTATIME;
+
+ if (is_idmapped_mnt(mnt))
+ attr_flags |= MOUNT_ATTR_IDMAP;
+
+ return attr_flags;
+}
+
+static u64 mnt_to_propagation_flags(struct mount *m)
+{
+ u64 propagation = 0;
+
+ if (IS_MNT_SHARED(m))
+ propagation |= MS_SHARED;
+ if (IS_MNT_SLAVE(m))
+ propagation |= MS_SLAVE;
+ if (IS_MNT_UNBINDABLE(m))
+ propagation |= MS_UNBINDABLE;
+ if (!propagation)
+ propagation |= MS_PRIVATE;
+
+ return propagation;
+}
+
+static void statmount_sb_basic(struct kstatmount *s)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ s->sm.mask |= STATMOUNT_SB_BASIC;
+ s->sm.sb_dev_major = MAJOR(sb->s_dev);
+ s->sm.sb_dev_minor = MINOR(sb->s_dev);
+ s->sm.sb_magic = sb->s_magic;
+ s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
+}
+
+static void statmount_mnt_basic(struct kstatmount *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ s->sm.mask |= STATMOUNT_MNT_BASIC;
+ s->sm.mnt_id = m->mnt_id_unique;
+ s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
+ s->sm.mnt_id_old = m->mnt_id;
+ s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
+ s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
+ s->sm.mnt_propagation = mnt_to_propagation_flags(m);
+ s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0;
+ s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
+}
+
+static void statmount_propagate_from(struct kstatmount *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
+ if (IS_MNT_SLAVE(m))
+ s->sm.propagate_from = get_dominating_id(m, &current->fs->root);
+}
+
+static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+ size_t start = seq->count;
+
+ ret = show_path(seq, s->mnt->mnt_root);
+ if (ret)
+ return ret;
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ /*
+ * Unescape the result. It would be better if supplied string was not
+ * escaped in the first place, but that's a pretty invasive change.
+ */
+ seq->buf[seq->count] = '\0';
+ seq->count = start;
+ seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
+ return 0;
+}
+
+static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ int err;
+
+ err = seq_path_root(seq, &mnt_path, &s->root, "");
+ return err == SEQ_SKIP ? 0 : err;
+}
+
+static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ seq_puts(seq, sb->s_type->name);
+ return 0;
+}
+
+static int statmount_string(struct kstatmount *s, u64 flag)
+{
+ int ret;
+ size_t kbufsize;
+ struct seq_file *seq = &s->seq;
+ struct statmount *sm = &s->sm;
+
+ switch (flag) {
+ case STATMOUNT_FS_TYPE:
+ sm->fs_type = seq->count;
+ ret = statmount_fs_type(s, seq);
+ break;
+ case STATMOUNT_MNT_ROOT:
+ sm->mnt_root = seq->count;
+ ret = statmount_mnt_root(s, seq);
+ break;
+ case STATMOUNT_MNT_POINT:
+ sm->mnt_point = seq->count;
+ ret = statmount_mnt_point(s, seq);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
+ return -EOVERFLOW;
+ if (kbufsize >= s->bufsize)
+ return -EOVERFLOW;
+
+ /* signal a retry */
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ seq->buf[seq->count++] = '\0';
+ sm->mask |= flag;
+ return 0;
+}
+
+static int copy_statmount_to_user(struct kstatmount *s)
+{
+ struct statmount *sm = &s->sm;
+ struct seq_file *seq = &s->seq;
+ char __user *str = ((char __user *)s->buf) + sizeof(*sm);
+ size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
+
+ if (seq->count && copy_to_user(str, seq->buf, seq->count))
+ return -EFAULT;
+
+ /* Return the number of bytes copied to the buffer */
+ sm->size = copysize + seq->count;
+ if (copy_to_user(s->buf, sm, copysize))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_statmount(struct kstatmount *s)
+{
+ struct mount *m = real_mount(s->mnt);
+ int err;
+
+ /*
+ * Don't trigger audit denials. We just want to determine what
+ * mounts to show users.
+ */
+ if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
+ !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = security_sb_statfs(s->mnt->mnt_root);
+ if (err)
+ return err;
+
+ if (s->mask & STATMOUNT_SB_BASIC)
+ statmount_sb_basic(s);
+
+ if (s->mask & STATMOUNT_MNT_BASIC)
+ statmount_mnt_basic(s);
+
+ if (s->mask & STATMOUNT_PROPAGATE_FROM)
+ statmount_propagate_from(s);
+
+ if (s->mask & STATMOUNT_FS_TYPE)
+ err = statmount_string(s, STATMOUNT_FS_TYPE);
+
+ if (!err && s->mask & STATMOUNT_MNT_ROOT)
+ err = statmount_string(s, STATMOUNT_MNT_ROOT);
+
+ if (!err && s->mask & STATMOUNT_MNT_POINT)
+ err = statmount_string(s, STATMOUNT_MNT_POINT);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static inline bool retry_statmount(const long ret, size_t *seq_size)
+{
+ if (likely(ret != -EAGAIN))
+ return false;
+ if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
+ return false;
+ if (unlikely(*seq_size > MAX_RW_COUNT))
+ return false;
+ return true;
+}
+
+static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
+ struct statmount __user *buf, size_t bufsize,
+ size_t seq_size)
+{
+ if (!access_ok(buf, bufsize))
+ return -EFAULT;
+
+ memset(ks, 0, sizeof(*ks));
+ ks->mask = kreq->param;
+ ks->buf = buf;
+ ks->bufsize = bufsize;
+ ks->seq.size = seq_size;
+ ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
+ if (!ks->seq.buf)
+ return -ENOMEM;
+ return 0;
+}
+
+static int copy_mnt_id_req(const struct mnt_id_req __user *req,
+ struct mnt_id_req *kreq)
+{
+ int ret;
+ size_t usize;
+
+ BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER0);
+
+ ret = get_user(usize, &req->size);
+ if (ret)
+ return -EFAULT;
+ if (unlikely(usize > PAGE_SIZE))
+ return -E2BIG;
+ if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
+ return -EINVAL;
+ memset(kreq, 0, sizeof(*kreq));
+ ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
+ if (ret)
+ return ret;
+ if (kreq->spare != 0)
+ return -EINVAL;
+ return 0;
+}
+
+SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
+ struct statmount __user *, buf, size_t, bufsize,
+ unsigned int, flags)
+{
+ struct vfsmount *mnt;
+ struct mnt_id_req kreq;
+ struct kstatmount ks;
+ /* We currently support retrieval of 3 strings. */
+ size_t seq_size = 3 * PATH_MAX;
+ int ret;
+
+ if (flags)
+ return -EINVAL;
+
+ ret = copy_mnt_id_req(req, &kreq);
+ if (ret)
+ return ret;
+
+retry:
+ ret = prepare_kstatmount(&ks, &kreq, buf, bufsize, seq_size);
+ if (ret)
+ return ret;
+
+ down_read(&namespace_sem);
+ mnt = lookup_mnt_in_ns(kreq.mnt_id, current->nsproxy->mnt_ns);
+ if (!mnt) {
+ up_read(&namespace_sem);
+ kvfree(ks.seq.buf);
+ return -ENOENT;
+ }
+
+ ks.mnt = mnt;
+ get_fs_root(current->fs, &ks.root);
+ ret = do_statmount(&ks);
+ path_put(&ks.root);
+ up_read(&namespace_sem);
+
+ if (!ret)
+ ret = copy_statmount_to_user(&ks);
+ kvfree(ks.seq.buf);
+ if (retry_statmount(ret, &seq_size))
+ goto retry;
+ return ret;
+}
+
+static struct mount *listmnt_next(struct mount *curr)
+{
+ return node_to_mount(rb_next(&curr->mnt_node));
+}
+
+static ssize_t do_listmount(struct mount *first, struct path *orig, u64 mnt_id,
+ u64 __user *buf, size_t bufsize,
+ const struct path *root)
+{
+ struct mount *r;
+ ssize_t ctr;
+ int err;
+
+ /*
+ * Don't trigger audit denials. We just want to determine what
+ * mounts to show users.
+ */
+ if (!is_path_reachable(real_mount(orig->mnt), orig->dentry, root) &&
+ !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = security_sb_statfs(orig->dentry);
+ if (err)
+ return err;
+
+ for (ctr = 0, r = first; r && ctr < bufsize; r = listmnt_next(r)) {
+ if (r->mnt_id_unique == mnt_id)
+ continue;
+ if (!is_path_reachable(r, r->mnt.mnt_root, orig))
+ continue;
+ ctr = array_index_nospec(ctr, bufsize);
+ if (put_user(r->mnt_id_unique, buf + ctr))
+ return -EFAULT;
+ if (check_add_overflow(ctr, 1, &ctr))
+ return -ERANGE;
+ }
+ return ctr;
+}
+
+SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
+ u64 __user *, buf, size_t, bufsize, unsigned int, flags)
+{
+ struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+ struct mnt_id_req kreq;
+ struct mount *first;
+ struct path root, orig;
+ u64 mnt_id, last_mnt_id;
+ ssize_t ret;
+
+ if (flags)
+ return -EINVAL;
+
+ ret = copy_mnt_id_req(req, &kreq);
+ if (ret)
+ return ret;
+ mnt_id = kreq.mnt_id;
+ last_mnt_id = kreq.param;
+
+ down_read(&namespace_sem);
+ get_fs_root(current->fs, &root);
+ if (mnt_id == LSMT_ROOT) {
+ orig = root;
+ } else {
+ ret = -ENOENT;
+ orig.mnt = lookup_mnt_in_ns(mnt_id, ns);
+ if (!orig.mnt)
+ goto err;
+ orig.dentry = orig.mnt->mnt_root;
+ }
+ if (!last_mnt_id)
+ first = node_to_mount(rb_first(&ns->mounts));
+ else
+ first = mnt_find_id_at(ns, last_mnt_id + 1);
+
+ ret = do_listmount(first, &orig, mnt_id, buf, bufsize, &root);
+err:
+ path_put(&root);
+ up_read(&namespace_sem);
+ return ret;
+}
+
+
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
@@ -4691,10 +5123,9 @@ static void __init init_mount_tree(void)
if (IS_ERR(ns))
panic("Can't allocate initial namespace");
m = real_mount(mnt);
- m->mnt_ns = ns;
ns->root = m;
- ns->mounts = 1;
- list_add(&m->mnt_list, &ns->list);
+ ns->nr_mounts = 1;
+ mnt_add_to_ns(ns, m);
init_task.nsproxy->mnt_ns = ns;
get_mnt_ns(ns);
@@ -4821,18 +5252,14 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
int *new_mnt_flags)
{
int new_flags = *new_mnt_flags;
- struct mount *mnt;
+ struct mount *mnt, *n;
bool visible = false;
down_read(&namespace_sem);
- lock_ns_list(ns);
- list_for_each_entry(mnt, &ns->list, mnt_list) {
+ rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
struct mount *child;
int mnt_flags;
- if (mnt_is_cursor(mnt))
- continue;
-
if (mnt->mnt.mnt_sb->s_type != sb->s_type)
continue;
@@ -4880,7 +5307,6 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
next: ;
}
found:
- unlock_ns_list(ns);
up_read(&namespace_sem);
return visible;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 3f9768810427..e8cccb94b927 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -567,7 +567,7 @@ const struct address_space_operations nfs_file_aops = {
.migrate_folio = nfs_migrate_folio,
.launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = nfs_swap_activate,
.swap_deactivate = nfs_swap_deactivate,
.swap_rw = nfs_swap_rw,
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index 2ad66a8922f4..49aaf28a6950 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -132,7 +132,7 @@ nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
- return list_lru_add(lru, &entry->lru);
+ return list_lru_add_obj(lru, &entry->lru);
}
static bool
@@ -143,7 +143,7 @@ nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
- return list_lru_del(lru, &entry->lru);
+ return list_lru_del_obj(lru, &entry->lru);
}
/*
@@ -349,7 +349,7 @@ nfs4_xattr_cache_unlink(struct inode *inode)
oldcache = nfsi->xattr_cache;
if (oldcache != NULL) {
- list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
+ list_lru_del_obj(&nfs4_xattr_cache_lru, &oldcache->lru);
oldcache->inode = NULL;
}
nfsi->xattr_cache = NULL;
@@ -474,7 +474,7 @@ nfs4_xattr_get_cache(struct inode *inode, int add)
kref_get(&cache->ref);
nfsi->xattr_cache = cache;
cache->inode = inode;
- list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
+ list_lru_add_obj(&nfs4_xattr_cache_lru, &cache->lru);
}
spin_unlock(&inode->i_lock);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 02788c3c85e5..e238abc78a13 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -10,6 +10,7 @@
#include <linux/mount.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_ssc.h>
+#include <linux/splice.h>
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
@@ -195,8 +196,8 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
ret = __nfs4_copy_file_range(file_in, pos_in, file_out, pos_out, count,
flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(file_in, pos_in, file_out,
- pos_out, count, flags);
+ ret = splice_copy_file_range(file_in, pos_in, file_out,
+ pos_out, count);
return ret;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b664caea8b4e..7248705faef4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -192,13 +192,13 @@ static struct nfs_page *nfs_folio_find_private_request(struct folio *folio)
if (!folio_test_private(folio))
return NULL;
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
req = nfs_folio_private_request(folio);
if (req) {
WARN_ON_ONCE(req->wb_head != req);
kref_get(&req->wb_kref);
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return req;
}
@@ -769,13 +769,13 @@ static void nfs_inode_add_request(struct nfs_page *req)
* Swap-space should not get truncated. Hence no need to plug the race
* with invalidate/truncate.
*/
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (likely(!folio_test_swapcache(folio))) {
set_bit(PG_MAPPED, &req->wb_flags);
folio_set_private(folio);
folio->private = req;
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests);
/* this a head request for a page group - mark it as having an
* extra reference so sub groups can follow suit.
@@ -796,13 +796,13 @@ static void nfs_inode_remove_request(struct nfs_page *req)
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (likely(folio && !folio_test_swapcache(folio))) {
folio->private = NULL;
folio_clear_private(folio);
clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ef063f93fde9..6c2decfdeb4b 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -322,7 +322,7 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
static bool nfsd_file_lru_add(struct nfsd_file *nf)
{
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) {
+ if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru)) {
trace_nfsd_file_lru_add(nf);
return true;
}
@@ -331,7 +331,7 @@ static bool nfsd_file_lru_add(struct nfsd_file *nf)
static bool nfsd_file_lru_remove(struct nfsd_file *nf)
{
- if (list_lru_del(&nfsd_file_lru, &nf->nf_lru)) {
+ if (list_lru_del_obj(&nfsd_file_lru, &nf->nf_lru)) {
trace_nfsd_file_lru_del(nf);
return true;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7cd513e59305..87fed75808ff 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -693,6 +693,7 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
char *mesg = buf;
int fd, err;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
err = get_int(&mesg, &fd);
if (err != 0 || fd < 0)
@@ -703,15 +704,15 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
if (err != 0)
return err;
- err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+ serv = nn->nfsd_serv;
+ err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
- if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+ if (err < 0 && !serv->sv_nrthreads && !nn->keep_active)
nfsd_last_thread(net);
- else if (err >= 0 &&
- !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
- svc_get(nn->nfsd_serv);
+ else if (err >= 0 && !serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ svc_get(serv);
- nfsd_put(net);
+ svc_put(serv);
return err;
}
@@ -725,6 +726,7 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
struct svc_xprt *xprt;
int port, err;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
if (sscanf(buf, "%15s %5u", transport, &port) != 2)
return -EINVAL;
@@ -737,32 +739,33 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
if (err != 0)
return err;
- err = svc_xprt_create(nn->nfsd_serv, transport, net,
+ serv = nn->nfsd_serv;
+ err = svc_xprt_create(serv, transport, net,
PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
if (err < 0)
goto out_err;
- err = svc_xprt_create(nn->nfsd_serv, transport, net,
+ err = svc_xprt_create(serv, transport, net,
PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
- if (!nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
- svc_get(nn->nfsd_serv);
+ if (!serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ svc_get(serv);
- nfsd_put(net);
+ svc_put(serv);
return 0;
out_close:
- xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
+ xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
if (xprt != NULL) {
svc_xprt_close(xprt);
svc_xprt_put(xprt);
}
out_err:
- if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+ if (!serv->sv_nrthreads && !nn->keep_active)
nfsd_last_thread(net);
- nfsd_put(net);
+ svc_put(serv);
return err;
}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 3286ffacbc56..9ed0e08d16c2 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -113,13 +113,6 @@ int nfsd_pool_stats_open(struct inode *, struct file *);
int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-static inline void nfsd_put(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- svc_put(nn->nfsd_serv);
-}
-
bool i_am_nfsd(void);
struct nfsdfs_client {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index e01e4e2acbd9..707ef21f275b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1039,7 +1039,10 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
ssize_t host_err;
trace_nfsd_read_splice(rqstp, fhp, offset, *count);
- host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
+ host_err = rw_verify_area(READ, file, &offset, *count);
+ if (!host_err)
+ host_err = splice_direct_to_actor(file, &sd,
+ nfsd_direct_splice_actor);
return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
@@ -1176,9 +1179,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
since = READ_ONCE(file->f_wb_err);
if (verf)
nfsd_copy_write_verifier(verf, nn);
- file_start_write(file);
host_err = vfs_iter_write(file, &iter, &pos, flags);
- file_end_write(file);
if (host_err < 0) {
commit_reset_write_verifier(nn, rqstp, host_err);
goto out_nfserr;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 5710833ac1cc..0131d83b912d 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -64,8 +64,8 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
- unlock_page(bh->b_page);
- put_page(bh->b_page);
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
return bh;
}
@@ -75,7 +75,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
{
struct buffer_head *bh;
struct inode *inode = btnc->host;
- struct page *page;
+ struct folio *folio;
int err;
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -83,7 +83,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
return -ENOMEM;
err = -EEXIST; /* internal code */
- page = bh->b_page;
+ folio = bh->b_folio;
if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found;
@@ -130,8 +130,8 @@ found:
*pbh = bh;
out_locked:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
@@ -145,19 +145,19 @@ out_locked:
void nilfs_btnode_delete(struct buffer_head *bh)
{
struct address_space *mapping;
- struct page *page = bh->b_page;
- pgoff_t index = page_index(page);
+ struct folio *folio = bh->b_folio;
+ pgoff_t index = folio->index;
int still_dirty;
- get_page(page);
- lock_page(page);
- wait_on_page_writeback(page);
+ folio_get(folio);
+ folio_lock(folio);
+ folio_wait_writeback(folio);
nilfs_forget_buffer(bh);
- still_dirty = PageDirty(page);
- mapping = page->mapping;
- unlock_page(page);
- put_page(page);
+ still_dirty = folio_test_dirty(folio);
+ mapping = folio->mapping;
+ folio_unlock(folio);
+ folio_put(folio);
if (!still_dirty && mapping)
invalidate_inode_pages2_range(mapping, index, index);
@@ -185,23 +185,23 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_SHIFT) {
- struct page *opage = obh->b_page;
- lock_page(opage);
+ struct folio *ofolio = obh->b_folio;
+ folio_lock(ofolio);
retry:
/* BUG_ON(oldkey != obh->b_folio->index); */
- if (unlikely(oldkey != opage->index))
- NILFS_PAGE_BUG(opage,
+ if (unlikely(oldkey != ofolio->index))
+ NILFS_FOLIO_BUG(ofolio,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
xa_lock_irq(&btnc->i_pages);
- err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS);
+ err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS);
xa_unlock_irq(&btnc->i_pages);
/*
- * Note: page->index will not change to newkey until
+ * Note: folio->index will not change to newkey until
* nilfs_btnode_commit_change_key() will be called.
- * To protect the page in intermediate state, the page lock
+ * To protect the folio in intermediate state, the folio lock
* is held.
*/
if (!err)
@@ -213,7 +213,7 @@ retry:
if (!err)
goto retry;
/* fallback to copy mode */
- unlock_page(opage);
+ folio_unlock(ofolio);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
@@ -225,7 +225,7 @@ retry:
return 0;
failed_unlock:
- unlock_page(obh->b_page);
+ folio_unlock(obh->b_folio);
return err;
}
@@ -238,15 +238,15 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
{
struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
- struct page *opage;
+ struct folio *ofolio;
if (oldkey == newkey)
return;
if (nbh == NULL) { /* blocksize == pagesize */
- opage = obh->b_page;
- if (unlikely(oldkey != opage->index))
- NILFS_PAGE_BUG(opage,
+ ofolio = obh->b_folio;
+ if (unlikely(oldkey != ofolio->index))
+ NILFS_FOLIO_BUG(ofolio,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
@@ -257,8 +257,8 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
__xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&btnc->i_pages);
- opage->index = obh->b_blocknr = newkey;
- unlock_page(opage);
+ ofolio->index = obh->b_blocknr = newkey;
+ folio_unlock(ofolio);
} else {
nilfs_copy_buffer(nbh, obh);
mark_buffer_dirty(nbh);
@@ -284,7 +284,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
if (nbh == NULL) { /* blocksize == pagesize */
xa_erase_irq(&btnc->i_pages, newkey);
- unlock_page(ctxt->bh->b_page);
+ folio_unlock(ctxt->bh->b_folio);
} else {
/*
* When canceling a buffer that a prepare operation has
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 9ebefb3acb0e..39136637f715 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -552,11 +552,29 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
}
/**
- * nilfs_cpfile_get_cpinfo -
- * @cpfile:
- * @cno:
- * @ci:
- * @nci:
+ * nilfs_cpfile_get_cpinfo - get information on checkpoints
+ * @cpfile: checkpoint file inode
+ * @cnop: place to pass a starting checkpoint number and receive a
+ * checkpoint number to continue the search
+ * @mode: mode of checkpoints that the caller wants to retrieve
+ * @buf: buffer for storing checkpoints' information
+ * @cisz: byte size of one checkpoint info item in array
+ * @nci: number of checkpoint info items to retrieve
+ *
+ * nilfs_cpfile_get_cpinfo() searches for checkpoints in @mode state
+ * starting from the checkpoint number stored in @cnop, and stores
+ * information about found checkpoints in @buf.
+ * The buffer pointed to by @buf must be large enough to store information
+ * for @nci checkpoints. If at least one checkpoint information is
+ * successfully retrieved, @cnop is updated to point to the checkpoint
+ * number to continue searching.
+ *
+ * Return: Count of checkpoint info items stored in the output buffer on
+ * success, or the following negative error code on failure.
+ * * %-EINVAL - Invalid checkpoint mode.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Invalid checkpoint number specified.
*/
ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index de2073c47651..bc846b904b68 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -64,12 +64,6 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode)
return inode->i_sb->s_blocksize;
}
-static inline void nilfs_put_page(struct page *page)
-{
- kunmap(page);
- put_page(page);
-}
-
/*
* Return the offset into page `page_nr' of the last valid
* byte in that page, plus one.
@@ -84,48 +78,46 @@ static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte;
}
-static int nilfs_prepare_chunk(struct page *page, unsigned int from,
+static int nilfs_prepare_chunk(struct folio *folio, unsigned int from,
unsigned int to)
{
- loff_t pos = page_offset(page) + from;
+ loff_t pos = folio_pos(folio) + from;
- return __block_write_begin(page, pos, to - from, nilfs_get_block);
+ return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block);
}
-static void nilfs_commit_chunk(struct page *page,
- struct address_space *mapping,
- unsigned int from, unsigned int to)
+static void nilfs_commit_chunk(struct folio *folio,
+ struct address_space *mapping, size_t from, size_t to)
{
struct inode *dir = mapping->host;
- loff_t pos = page_offset(page) + from;
- unsigned int len = to - from;
- unsigned int nr_dirty, copied;
+ loff_t pos = folio_pos(folio) + from;
+ size_t copied, len = to - from;
+ unsigned int nr_dirty;
int err;
- nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
- copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
+ nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
+ copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
if (IS_DIRSYNC(dir))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
err = nilfs_set_file_dirty(dir, nr_dirty);
WARN_ON(err); /* do not happen */
- unlock_page(page);
+ folio_unlock(folio);
}
-static bool nilfs_check_page(struct page *page)
+static bool nilfs_check_folio(struct folio *folio, char *kaddr)
{
- struct inode *dir = page->mapping->host;
+ struct inode *dir = folio->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned int chunk_size = nilfs_chunk_size(dir);
- char *kaddr = page_address(page);
- unsigned int offs, rec_len;
- unsigned int limit = PAGE_SIZE;
+ size_t offs, rec_len;
+ size_t limit = folio_size(folio);
struct nilfs_dir_entry *p;
char *error;
- if ((dir->i_size >> PAGE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_MASK;
+ if (dir->i_size < folio_pos(folio) + limit) {
+ limit = dir->i_size - folio_pos(folio);
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -147,7 +139,7 @@ static bool nilfs_check_page(struct page *page)
if (offs != limit)
goto Eend;
out:
- SetPageChecked(page);
+ folio_set_checked(folio);
return true;
/* Too bad, we had an error */
@@ -170,8 +162,8 @@ Espan:
error = "directory entry across blocks";
bad_entry:
nilfs_error(sb,
- "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index << PAGE_SHIFT) + offs,
+ "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",
+ dir->i_ino, error, (folio->index << PAGE_SHIFT) + offs,
(unsigned long)le64_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
@@ -179,29 +171,34 @@ Eend:
p = (struct nilfs_dir_entry *)(kaddr + offs);
nilfs_error(sb,
"entry in directory #%lu spans the page boundary offset=%lu, inode=%lu",
- dir->i_ino, (page->index << PAGE_SHIFT) + offs,
+ dir->i_ino, (folio->index << PAGE_SHIFT) + offs,
(unsigned long)le64_to_cpu(p->inode));
fail:
- SetPageError(page);
+ folio_set_error(folio);
return false;
}
-static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
+static void *nilfs_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
+ struct folio *folio = read_mapping_folio(mapping, n, NULL);
+ void *kaddr;
- if (!IS_ERR(page)) {
- kmap(page);
- if (unlikely(!PageChecked(page))) {
- if (!nilfs_check_page(page))
- goto fail;
- }
+ if (IS_ERR(folio))
+ return folio;
+
+ kaddr = kmap_local_folio(folio, 0);
+ if (unlikely(!folio_test_checked(folio))) {
+ if (!nilfs_check_folio(folio, kaddr))
+ goto fail;
}
- return page;
+
+ *foliop = folio;
+ return kaddr;
fail:
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return ERR_PTR(-EIO);
}
@@ -275,21 +272,21 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
struct nilfs_dir_entry *de;
- struct page *page = nilfs_get_page(inode, n);
+ struct folio *folio;
- if (IS_ERR(page)) {
+ kaddr = nilfs_get_folio(inode, n, &folio);
+ if (IS_ERR(kaddr)) {
nilfs_error(sb, "bad page in #%lu", inode->i_ino);
ctx->pos += PAGE_SIZE - offset;
return -EIO;
}
- kaddr = page_address(page);
de = (struct nilfs_dir_entry *)(kaddr + offset);
limit = kaddr + nilfs_last_byte(inode, n) -
NILFS_DIR_REC_LEN(1);
for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
if (de->rec_len == 0) {
nilfs_error(sb, "zero-length directory entry");
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return -EIO;
}
if (de->inode) {
@@ -302,72 +299,67 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit(ctx, de->name, de->name_len,
le64_to_cpu(de->inode), t)) {
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return 0;
}
}
ctx->pos += nilfs_rec_len_from_disk(de->rec_len);
}
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
/*
- * nilfs_find_entry()
+ * nilfs_find_entry()
+ *
+ * Finds an entry in the specified directory with the wanted name. It
+ * returns the folio in which the entry was found, and the entry itself.
+ * The folio is mapped and unlocked. When the caller is finished with
+ * the entry, it should call folio_release_kmap().
*
- * finds an entry in the specified directory with the wanted name. It
- * returns the page in which the entry was found, and the entry itself
- * (as a parameter - res_dir). Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
+ * On failure, returns NULL and the caller should ignore foliop.
*/
-struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
- struct page **res_page)
+struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,
+ const struct qstr *qstr, struct folio **foliop)
{
const unsigned char *name = qstr->name;
int namelen = qstr->len;
unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
struct nilfs_inode_info *ei = NILFS_I(dir);
struct nilfs_dir_entry *de;
if (npages == 0)
goto out;
- /* OFFSET_CACHE */
- *res_page = NULL;
-
start = ei->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
- char *kaddr;
+ char *kaddr = nilfs_get_folio(dir, n, foliop);
- page = nilfs_get_page(dir, n);
- if (!IS_ERR(page)) {
- kaddr = page_address(page);
+ if (!IS_ERR(kaddr)) {
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
nilfs_error(dir->i_sb,
"zero-length directory entry");
- nilfs_put_page(page);
+ folio_release_kmap(*foliop, kaddr);
goto out;
}
if (nilfs_match(namelen, name, de))
goto found;
de = nilfs_next_entry(de);
}
- nilfs_put_page(page);
+ folio_release_kmap(*foliop, kaddr);
}
if (++n >= npages)
n = 0;
- /* next page is past the blocks we've got */
+ /* next folio is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
nilfs_error(dir->i_sb,
"dir %lu size %lld exceeds block count %llu",
@@ -380,55 +372,47 @@ out:
return NULL;
found:
- *res_page = page;
ei->i_dir_start_lookup = n;
return de;
}
-struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
+struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop)
{
- struct page *page = nilfs_get_page(dir, 0);
- struct nilfs_dir_entry *de = NULL;
+ struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop);
- if (!IS_ERR(page)) {
- de = nilfs_next_entry(
- (struct nilfs_dir_entry *)page_address(page));
- *p = page;
- }
- return de;
+ if (IS_ERR(de))
+ return NULL;
+ return nilfs_next_entry(de);
}
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
{
ino_t res = 0;
struct nilfs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
- de = nilfs_find_entry(dir, qstr, &page);
+ de = nilfs_find_entry(dir, qstr, &folio);
if (de) {
res = le64_to_cpu(de->inode);
- kunmap(page);
- put_page(page);
+ folio_release_kmap(folio, de);
}
return res;
}
-/* Releases the page */
void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
- struct page *page, struct inode *inode)
+ struct folio *folio, struct inode *inode)
{
- unsigned int from = (char *)de - (char *)page_address(page);
- unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len);
- struct address_space *mapping = page->mapping;
+ size_t from = offset_in_folio(folio, de);
+ size_t to = from + nilfs_rec_len_from_disk(de->rec_len);
+ struct address_space *mapping = folio->mapping;
int err;
- lock_page(page);
- err = nilfs_prepare_chunk(page, from, to);
+ folio_lock(folio);
+ err = nilfs_prepare_chunk(folio, from, to);
BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
- nilfs_commit_chunk(page, mapping, from, to);
- nilfs_put_page(page);
+ nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
@@ -443,31 +427,28 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
unsigned int chunk_size = nilfs_chunk_size(dir);
unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
unsigned short rec_len, name_len;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct nilfs_dir_entry *de;
unsigned long npages = dir_pages(dir);
unsigned long n;
- char *kaddr;
- unsigned int from, to;
+ size_t from, to;
int err;
/*
* We take care of directory expansion in the same loop.
- * This code plays outside i_size, so it locks the page
+ * This code plays outside i_size, so it locks the folio
* to protect that region.
*/
for (n = 0; n <= npages; n++) {
+ char *kaddr = nilfs_get_folio(dir, n, &folio);
char *dir_end;
- page = nilfs_get_page(dir, n);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
- lock_page(page);
- kaddr = page_address(page);
+ if (IS_ERR(kaddr))
+ return PTR_ERR(kaddr);
+ folio_lock(folio);
dir_end = kaddr + nilfs_last_byte(dir, n);
de = (struct nilfs_dir_entry *)kaddr;
- kaddr += PAGE_SIZE - reclen;
+ kaddr += folio_size(folio) - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -494,16 +475,16 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
de = (struct nilfs_dir_entry *)((char *)de + rec_len);
}
- unlock_page(page);
- nilfs_put_page(page);
+ folio_unlock(folio);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- from = (char *)de - (char *)page_address(page);
+ from = offset_in_folio(folio, de);
to = from + rec_len;
- err = nilfs_prepare_chunk(page, from, to);
+ err = nilfs_prepare_chunk(folio, from, to);
if (err)
goto out_unlock;
if (de->inode) {
@@ -518,29 +499,28 @@ got_it:
memcpy(de->name, name, namelen);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
- nilfs_commit_chunk(page, page->mapping, from, to);
+ nilfs_commit_chunk(folio, folio->mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
- nilfs_put_page(page);
-out:
+ folio_release_kmap(folio, de);
return err;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
goto out_put;
}
/*
* nilfs_delete_entry deletes a directory entry by merging it with the
- * previous entry. Page is up-to-date. Releases the page.
+ * previous entry. Folio is up-to-date.
*/
-int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
+int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
- char *kaddr = page_address(page);
- unsigned int from, to;
+ char *kaddr = (char *)((unsigned long)dir & ~(folio_size(folio) - 1));
+ size_t from, to;
struct nilfs_dir_entry *de, *pde = NULL;
int err;
@@ -559,17 +539,16 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
de = nilfs_next_entry(de);
}
if (pde)
- from = (char *)pde - (char *)page_address(page);
- lock_page(page);
- err = nilfs_prepare_chunk(page, from, to);
+ from = (char *)pde - kaddr;
+ folio_lock(folio);
+ err = nilfs_prepare_chunk(folio, from, to);
BUG_ON(err);
if (pde)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
- nilfs_commit_chunk(page, mapping, from, to);
+ nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
out:
- nilfs_put_page(page);
return err;
}
@@ -579,21 +558,21 @@ out:
int nilfs_make_empty(struct inode *inode, struct inode *parent)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
+ struct folio *folio = filemap_grab_folio(mapping, 0);
unsigned int chunk_size = nilfs_chunk_size(inode);
struct nilfs_dir_entry *de;
int err;
void *kaddr;
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- err = nilfs_prepare_chunk(page, 0, chunk_size);
+ err = nilfs_prepare_chunk(folio, 0, chunk_size);
if (unlikely(err)) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_folio(folio, 0);
memset(kaddr, 0, chunk_size);
de = (struct nilfs_dir_entry *)kaddr;
de->name_len = 1;
@@ -608,10 +587,10 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode);
- kunmap_atomic(kaddr);
- nilfs_commit_chunk(page, mapping, 0, chunk_size);
+ kunmap_local(kaddr);
+ nilfs_commit_chunk(folio, mapping, 0, chunk_size);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -620,18 +599,17 @@ fail:
*/
int nilfs_empty_dir(struct inode *inode)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
+ char *kaddr;
unsigned long i, npages = dir_pages(inode);
for (i = 0; i < npages; i++) {
- char *kaddr;
struct nilfs_dir_entry *de;
- page = nilfs_get_page(inode, i);
- if (IS_ERR(page))
+ kaddr = nilfs_get_folio(inode, i, &folio);
+ if (IS_ERR(kaddr))
continue;
- kaddr = page_address(page);
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
@@ -657,12 +635,12 @@ int nilfs_empty_dir(struct inode *inode)
}
de = nilfs_next_entry(de);
}
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- nilfs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return 0;
}
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 740ce26d1e76..bec33b89a075 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -45,34 +45,36 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vma->vm_file);
struct nilfs_transaction_info ti;
+ struct buffer_head *bh, *head;
int ret = 0;
if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
return VM_FAULT_SIGBUS; /* -ENOSPC */
sb_start_pagefault(inode->i_sb);
- lock_page(page);
- if (page->mapping != inode->i_mapping ||
- page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
- unlock_page(page);
+ folio_lock(folio);
+ if (folio->mapping != inode->i_mapping ||
+ folio_pos(folio) >= i_size_read(inode) ||
+ !folio_test_uptodate(folio)) {
+ folio_unlock(folio);
ret = -EFAULT; /* make the VM retry the fault */
goto out;
}
/*
- * check to see if the page is mapped already (no holes)
+ * check to see if the folio is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (folio_test_mappedtodisk(folio))
goto mapped;
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
+ head = folio_buffers(folio);
+ if (head) {
int fully_mapped = 1;
- bh = head = page_buffers(page);
+ bh = head;
do {
if (!buffer_mapped(bh)) {
fully_mapped = 0;
@@ -81,11 +83,11 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
} while (bh = bh->b_this_page, bh != head);
if (fully_mapped) {
- SetPageMappedToDisk(page);
+ folio_set_mappedtodisk(folio);
goto mapped;
}
}
- unlock_page(page);
+ folio_unlock(folio);
/*
* fill hole blocks
@@ -105,7 +107,7 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
out:
sb_end_pagefault(inode->i_sb);
return vmf_fs_error(ret);
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 8beb2730929d..bf9a11d58817 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -98,8 +98,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
*out_bh = bh;
failed:
- unlock_page(bh->b_page);
- put_page(bh->b_page);
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
if (unlikely(err))
brelse(bh);
return err;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index f861f3a0bf5c..9c334c722fc1 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -175,7 +175,8 @@ static int nilfs_writepages(struct address_space *mapping,
static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(page);
+ struct inode *inode = folio->mapping->host;
int err;
if (sb_rdonly(inode->i_sb)) {
@@ -185,13 +186,13 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
* have dirty pages that try to be flushed in background.
* So, here we simply discard this dirty page.
*/
- nilfs_clear_dirty_page(page, false);
- unlock_page(page);
+ nilfs_clear_folio_dirty(folio, false);
+ folio_unlock(folio);
return -EROFS;
}
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
if (wbc->sync_mode == WB_SYNC_ALL) {
err = nilfs_construct_segment(inode->i_sb);
@@ -214,7 +215,7 @@ static bool nilfs_dirty_folio(struct address_space *mapping,
/*
* The page may not be locked, eg if called from try_to_unmap_one()
*/
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
head = folio_buffers(folio);
if (head) {
struct buffer_head *bh = head;
@@ -230,7 +231,7 @@ static bool nilfs_dirty_folio(struct address_space *mapping,
} else if (ret) {
nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
if (nr_dirty)
nilfs_set_file_dirty(inode, nr_dirty);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 40ffade49f38..cfb6aca5ec38 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -872,16 +872,14 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
nsegs = argv[4].v_nmembs;
if (argv[4].v_size != argsz[4])
goto out;
- if (nsegs > UINT_MAX / sizeof(__u64))
- goto out;
/*
* argv[4] points to segment numbers this ioctl cleans. We
- * use kmalloc() for its buffer because memory used for the
- * segment numbers is enough small.
+ * use kmalloc() for its buffer because the memory used for the
+ * segment numbers is small enough.
*/
- kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
- nsegs * sizeof(__u64));
+ kbufs[4] = memdup_array_user((void __user *)(unsigned long)argv[4].v_base,
+ nsegs, sizeof(__u64));
if (IS_ERR(kbufs[4])) {
ret = PTR_ERR(kbufs[4]);
goto out;
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c97c77a39668..e45c01a559c0 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -97,8 +97,8 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
}
failed_bh:
- unlock_page(bh->b_page);
- put_page(bh->b_page);
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
brelse(bh);
failed_unlock:
@@ -158,8 +158,8 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf,
*out_bh = bh;
failed_bh:
- unlock_page(bh->b_page);
- put_page(bh->b_page);
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
brelse(bh);
failed:
return ret;
@@ -399,7 +399,8 @@ int nilfs_mdt_fetch_dirty(struct inode *inode)
static int
nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(page);
+ struct inode *inode = folio->mapping->host;
struct super_block *sb;
int err = 0;
@@ -407,16 +408,16 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
/*
* It means that filesystem was remounted in read-only
* mode because of error or metadata corruption. But we
- * have dirty pages that try to be flushed in background.
- * So, here we simply discard this dirty page.
+ * have dirty folios that try to be flushed in background.
+ * So, here we simply discard this dirty folio.
*/
- nilfs_clear_dirty_page(page, false);
- unlock_page(page);
+ nilfs_clear_folio_dirty(folio, false);
+ folio_unlock(folio);
return -EROFS;
}
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
if (!inode)
return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 2a4e7f4a8102..959bd9fb3d81 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -260,11 +260,11 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct nilfs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
int err;
err = -ENOENT;
- de = nilfs_find_entry(dir, &dentry->d_name, &page);
+ de = nilfs_find_entry(dir, &dentry->d_name, &folio);
if (!de)
goto out;
@@ -279,7 +279,8 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
- err = nilfs_delete_entry(de, page);
+ err = nilfs_delete_entry(de, folio);
+ folio_release_kmap(folio, de);
if (err)
goto out;
@@ -347,9 +348,9 @@ static int nilfs_rename(struct mnt_idmap *idmap,
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct page *dir_page = NULL;
+ struct folio *dir_folio = NULL;
struct nilfs_dir_entry *dir_de = NULL;
- struct page *old_page;
+ struct folio *old_folio;
struct nilfs_dir_entry *old_de;
struct nilfs_transaction_info ti;
int err;
@@ -362,19 +363,19 @@ static int nilfs_rename(struct mnt_idmap *idmap,
return err;
err = -ENOENT;
- old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
- dir_de = nilfs_dotdot(old_inode, &dir_page);
+ dir_de = nilfs_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- struct page *new_page;
+ struct folio *new_folio;
struct nilfs_dir_entry *new_de;
err = -ENOTEMPTY;
@@ -382,10 +383,11 @@ static int nilfs_rename(struct mnt_idmap *idmap,
goto out_dir;
err = -ENOENT;
- new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
+ new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
if (!new_de)
goto out_dir;
- nilfs_set_link(new_dir, new_de, new_page, old_inode);
+ nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+ folio_release_kmap(new_folio, new_de);
nilfs_mark_inode_dirty(new_dir);
inode_set_ctime_current(new_inode);
if (dir_de)
@@ -408,12 +410,15 @@ static int nilfs_rename(struct mnt_idmap *idmap,
*/
inode_set_ctime_current(old_inode);
- nilfs_delete_entry(old_de, old_page);
+ nilfs_delete_entry(old_de, old_folio);
if (dir_de) {
- nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+ nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
+ folio_release_kmap(dir_folio, dir_de);
drop_nlink(old_dir);
}
+ folio_release_kmap(old_folio, old_de);
+
nilfs_mark_inode_dirty(old_dir);
nilfs_mark_inode_dirty(old_inode);
@@ -421,13 +426,10 @@ static int nilfs_rename(struct mnt_idmap *idmap,
return err;
out_dir:
- if (dir_de) {
- kunmap(dir_page);
- put_page(dir_page);
- }
+ if (dir_de)
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- kunmap(old_page);
- put_page(old_page);
+ folio_release_kmap(old_folio, old_de);
out:
nilfs_transaction_abort(old_dir->i_sb);
return err;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 8046490cd7fe..98cffaf0ac12 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -226,16 +226,16 @@ static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
}
/* dir.c */
-extern int nilfs_add_link(struct dentry *, struct inode *);
-extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
-extern int nilfs_make_empty(struct inode *, struct inode *);
-extern struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
-extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
-extern int nilfs_empty_dir(struct inode *);
-extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
-extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
- struct page *, struct inode *);
+int nilfs_add_link(struct dentry *, struct inode *);
+ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
+int nilfs_make_empty(struct inode *, struct inode *);
+struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
+ struct folio **);
+int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
+int nilfs_empty_dir(struct inode *);
+struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
+void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+ struct folio *, struct inode *);
/* file.c */
extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 06b04758f289..5c2eba1987bd 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -73,7 +73,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
*/
void nilfs_forget_buffer(struct buffer_head *bh)
{
- struct page *page = bh->b_page;
+ struct folio *folio = bh->b_folio;
const unsigned long clear_bits =
(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
@@ -81,12 +81,12 @@ void nilfs_forget_buffer(struct buffer_head *bh)
lock_buffer(bh);
set_mask_bits(&bh->b_state, clear_bits, 0);
- if (nilfs_page_buffers_clean(page))
- __nilfs_clear_page_dirty(page);
+ if (nilfs_folio_buffers_clean(folio))
+ __nilfs_clear_folio_dirty(folio);
bh->b_blocknr = -1;
- ClearPageUptodate(page);
- ClearPageMappedToDisk(page);
+ folio_clear_uptodate(folio);
+ folio_clear_mappedtodisk(folio);
unlock_buffer(bh);
brelse(bh);
}
@@ -131,48 +131,49 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
}
/**
- * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
- * @page: page to be checked
+ * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
+ * @folio: Folio to be checked.
*
- * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
- * Otherwise, it returns non-zero value.
+ * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
+ * Otherwise, it returns true.
*/
-int nilfs_page_buffers_clean(struct page *page)
+bool nilfs_folio_buffers_clean(struct folio *folio)
{
struct buffer_head *bh, *head;
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
do {
if (buffer_dirty(bh))
- return 0;
+ return false;
bh = bh->b_this_page;
} while (bh != head);
- return 1;
+ return true;
}
-void nilfs_page_bug(struct page *page)
+void nilfs_folio_bug(struct folio *folio)
{
+ struct buffer_head *bh, *head;
struct address_space *m;
unsigned long ino;
- if (unlikely(!page)) {
- printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
+ if (unlikely(!folio)) {
+ printk(KERN_CRIT "NILFS_FOLIO_BUG(NULL)\n");
return;
}
- m = page->mapping;
+ m = folio->mapping;
ino = m ? m->host->i_ino : 0;
- printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
+ printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n",
- page, page_ref_count(page),
- (unsigned long long)page->index, page->flags, m, ino);
+ folio, folio_ref_count(folio),
+ (unsigned long long)folio->index, folio->flags, m, ino);
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
+ head = folio_buffers(folio);
+ if (head) {
int i = 0;
- bh = head = page_buffers(page);
+ bh = head;
do {
printk(KERN_CRIT
" BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
@@ -258,7 +259,7 @@ repeat:
folio_lock(folio);
if (unlikely(!folio_test_dirty(folio)))
- NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
+ NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
dfolio = filemap_grab_folio(dmap, folio->index);
if (unlikely(IS_ERR(dfolio))) {
@@ -268,7 +269,7 @@ repeat:
break;
}
if (unlikely(!folio_buffers(folio)))
- NILFS_PAGE_BUG(&folio->page,
+ NILFS_FOLIO_BUG(folio,
"found empty page in dat page cache");
nilfs_copy_folio(dfolio, folio, true);
@@ -379,7 +380,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
* was acquired. Skip processing in that case.
*/
if (likely(folio->mapping == mapping))
- nilfs_clear_dirty_page(&folio->page, silent);
+ nilfs_clear_folio_dirty(folio, silent);
folio_unlock(folio);
}
@@ -389,32 +390,33 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
}
/**
- * nilfs_clear_dirty_page - discard dirty page
- * @page: dirty page that will be discarded
+ * nilfs_clear_folio_dirty - discard dirty folio
+ * @folio: dirty folio that will be discarded
* @silent: suppress [true] or print [false] warning messages
*/
-void nilfs_clear_dirty_page(struct page *page, bool silent)
+void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
+ struct buffer_head *bh, *head;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
if (!silent)
nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
- page_offset(page), inode->i_ino);
+ folio_pos(folio), inode->i_ino);
- ClearPageUptodate(page);
- ClearPageMappedToDisk(page);
+ folio_clear_uptodate(folio);
+ folio_clear_mappedtodisk(folio);
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
+ head = folio_buffers(folio);
+ if (head) {
const unsigned long clear_bits =
(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
- bh = head = page_buffers(page);
+ bh = head;
do {
lock_buffer(bh);
if (!silent)
@@ -427,7 +429,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
} while (bh = bh->b_this_page, bh != head);
}
- __nilfs_clear_page_dirty(page);
+ __nilfs_clear_folio_dirty(folio);
}
unsigned int nilfs_page_count_clean_buffers(struct page *page,
@@ -457,22 +459,23 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
* 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages.
*/
-int __nilfs_clear_page_dirty(struct page *page)
+void __nilfs_clear_folio_dirty(struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
if (mapping) {
xa_lock_irq(&mapping->i_pages);
- if (test_bit(PG_dirty, &page->flags)) {
- __xa_clear_mark(&mapping->i_pages, page_index(page),
+ if (folio_test_dirty(folio)) {
+ __xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&mapping->i_pages);
- return clear_page_dirty_for_io(page);
+ folio_clear_dirty_for_io(folio);
+ return;
}
xa_unlock_irq(&mapping->i_pages);
- return 0;
+ return;
}
- return TestClearPageDirty(page);
+ folio_clear_dirty(folio);
}
/**
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index d249ea1cefff..7e1a2c455a10 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -30,18 +30,18 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
-int __nilfs_clear_page_dirty(struct page *);
+void __nilfs_clear_folio_dirty(struct folio *);
struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
unsigned long, unsigned long);
void nilfs_forget_buffer(struct buffer_head *);
void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *);
-int nilfs_page_buffers_clean(struct page *);
-void nilfs_page_bug(struct page *);
+bool nilfs_folio_buffers_clean(struct folio *);
+void nilfs_folio_bug(struct folio *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
-void nilfs_clear_dirty_page(struct page *, bool);
+void nilfs_clear_folio_dirty(struct folio *, bool);
void nilfs_clear_dirty_pages(struct address_space *, bool);
unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
unsigned int);
@@ -49,7 +49,7 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
sector_t *blkoff);
-#define NILFS_PAGE_BUG(page, m, a...) \
- do { nilfs_page_bug(page); BUG(); } while (0)
+#define NILFS_FOLIO_BUG(folio, m, a...) \
+ do { nilfs_folio_bug(folio); BUG(); } while (0)
#endif /* _NILFS_PAGE_H */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55e31cc903d1..2590a0860eab 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1665,39 +1665,39 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
return 0;
}
-static void nilfs_begin_page_io(struct page *page)
+static void nilfs_begin_folio_io(struct folio *folio)
{
- if (!page || PageWriteback(page))
+ if (!folio || folio_test_writeback(folio))
/*
* For split b-tree node pages, this function may be called
* twice. We ignore the 2nd or later calls by this check.
*/
return;
- lock_page(page);
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- unlock_page(page);
+ folio_lock(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
}
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
- struct page *bd_page = NULL, *fs_page = NULL;
+ struct folio *bd_folio = NULL, *fs_folio = NULL;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
- if (bh->b_page != bd_page) {
- if (bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
+ if (bh->b_folio != bd_folio) {
+ if (bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
}
- bd_page = bh->b_page;
+ bd_folio = bh->b_folio;
}
}
@@ -1705,28 +1705,28 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
- if (bh->b_page != bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
+ bd_folio = bh->b_folio;
}
break;
}
- if (bh->b_page != fs_page) {
- nilfs_begin_page_io(fs_page);
- fs_page = bh->b_page;
+ if (bh->b_folio != fs_folio) {
+ nilfs_begin_folio_io(fs_folio);
+ fs_folio = bh->b_folio;
}
}
}
- if (bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
+ if (bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
}
- nilfs_begin_page_io(fs_page);
+ nilfs_begin_folio_io(fs_folio);
}
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
@@ -1739,17 +1739,18 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci,
return ret;
}
-static void nilfs_end_page_io(struct page *page, int err)
+static void nilfs_end_folio_io(struct folio *folio, int err)
{
- if (!page)
+ if (!folio)
return;
- if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
+ if (buffer_nilfs_node(folio_buffers(folio)) &&
+ !folio_test_writeback(folio)) {
/*
* For b-tree node pages, this function may be called twice
* or more because they might be split in a segment.
*/
- if (PageDirty(page)) {
+ if (folio_test_dirty(folio)) {
/*
* For pages holding split b-tree node buffers, dirty
* flag on the buffers may be cleared discretely.
@@ -1757,30 +1758,30 @@ static void nilfs_end_page_io(struct page *page, int err)
* remaining buffers, and it must be cancelled if
* all the buffers get cleaned later.
*/
- lock_page(page);
- if (nilfs_page_buffers_clean(page))
- __nilfs_clear_page_dirty(page);
- unlock_page(page);
+ folio_lock(folio);
+ if (nilfs_folio_buffers_clean(folio))
+ __nilfs_clear_folio_dirty(folio);
+ folio_unlock(folio);
}
return;
}
if (!err) {
- if (!nilfs_page_buffers_clean(page))
- __set_page_dirty_nobuffers(page);
- ClearPageError(page);
+ if (!nilfs_folio_buffers_clean(folio))
+ filemap_dirty_folio(folio->mapping, folio);
+ folio_clear_error(folio);
} else {
- __set_page_dirty_nobuffers(page);
- SetPageError(page);
+ filemap_dirty_folio(folio->mapping, folio);
+ folio_set_error(folio);
}
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
static void nilfs_abort_logs(struct list_head *logs, int err)
{
struct nilfs_segment_buffer *segbuf;
- struct page *bd_page = NULL, *fs_page = NULL;
+ struct folio *bd_folio = NULL, *fs_folio = NULL;
struct buffer_head *bh;
if (list_empty(logs))
@@ -1790,10 +1791,10 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
clear_buffer_uptodate(bh);
- if (bh->b_page != bd_page) {
- if (bd_page)
- end_page_writeback(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ if (bd_folio)
+ folio_end_writeback(bd_folio);
+ bd_folio = bh->b_folio;
}
}
@@ -1802,22 +1803,22 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
clear_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
clear_buffer_uptodate(bh);
- if (bh->b_page != bd_page) {
- end_page_writeback(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ folio_end_writeback(bd_folio);
+ bd_folio = bh->b_folio;
}
break;
}
- if (bh->b_page != fs_page) {
- nilfs_end_page_io(fs_page, err);
- fs_page = bh->b_page;
+ if (bh->b_folio != fs_folio) {
+ nilfs_end_folio_io(fs_folio, err);
+ fs_folio = bh->b_folio;
}
}
}
- if (bd_page)
- end_page_writeback(bd_page);
+ if (bd_folio)
+ folio_end_writeback(bd_folio);
- nilfs_end_page_io(fs_page, err);
+ nilfs_end_folio_io(fs_folio, err);
}
static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
@@ -1859,7 +1860,7 @@ static void nilfs_set_next_segment(struct the_nilfs *nilfs,
static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
- struct page *bd_page = NULL, *fs_page = NULL;
+ struct folio *bd_folio = NULL, *fs_folio = NULL;
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int update_sr = false;
@@ -1870,21 +1871,21 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
- if (bh->b_page != bd_page) {
- if (bd_page)
- end_page_writeback(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ if (bd_folio)
+ folio_end_writeback(bd_folio);
+ bd_folio = bh->b_folio;
}
}
/*
- * We assume that the buffers which belong to the same page
+ * We assume that the buffers which belong to the same folio
* continue over the buffer list.
- * Under this assumption, the last BHs of pages is
- * identifiable by the discontinuity of bh->b_page
- * (page != fs_page).
+ * Under this assumption, the last BHs of folios is
+ * identifiable by the discontinuity of bh->b_folio
+ * (folio != fs_folio).
*
* For B-tree node blocks, however, this assumption is not
- * guaranteed. The cleanup code of B-tree node pages needs
+ * guaranteed. The cleanup code of B-tree node folios needs
* special care.
*/
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
@@ -1897,16 +1898,16 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
- if (bh->b_page != bd_page) {
- end_page_writeback(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ folio_end_writeback(bd_folio);
+ bd_folio = bh->b_folio;
}
update_sr = true;
break;
}
- if (bh->b_page != fs_page) {
- nilfs_end_page_io(fs_page, 0);
- fs_page = bh->b_page;
+ if (bh->b_folio != fs_folio) {
+ nilfs_end_folio_io(fs_folio, 0);
+ fs_folio = bh->b_folio;
}
}
@@ -1920,13 +1921,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
}
}
/*
- * Since pages may continue over multiple segment buffers,
- * end of the last page must be checked outside of the loop.
+ * Since folios may continue over multiple segment buffers,
+ * end of the last folio must be checked outside of the loop.
*/
- if (bd_page)
- end_page_writeback(bd_page);
+ if (bd_folio)
+ folio_end_writeback(bd_folio);
- nilfs_end_page_io(fs_page, 0);
+ nilfs_end_folio_io(fs_folio, 0);
nilfs_drop_collected_inodes(&sci->sc_dirty_files);
@@ -2587,6 +2588,7 @@ static int nilfs_segctor_thread(void *arg)
"segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
+ set_freezable();
spin_lock(&sci->sc_state_lock);
loop:
for (;;) {
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 58ca7c936393..0a8119456c21 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -471,10 +471,15 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
kunmap_atomic(kaddr);
return;
}
- WARN_ON(nilfs_segment_usage_error(su));
- WARN_ON(!nilfs_segment_usage_dirty(su));
+ if (unlikely(nilfs_segment_usage_error(su)))
+ nilfs_warn(sufile->i_sb, "free segment %llu marked in error",
+ (unsigned long long)segnum);
sudirty = nilfs_segment_usage_dirty(su);
+ if (unlikely(!sudirty))
+ nilfs_warn(sufile->i_sb, "free unallocated segment %llu",
+ (unsigned long long)segnum);
+
nilfs_segment_usage_set_clean(su);
kunmap_atomic(kaddr);
mark_buffer_dirty(su_bh);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index a5d1fa4e7552..df8674173b22 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1314,15 +1314,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
return ERR_CAST(s);
if (!s->s_root) {
- /*
- * We drop s_umount here because we need to open the bdev and
- * bdev->open_mutex ranks above s_umount (blkdev_put() ->
- * __invalidate_device()). It is safe because we have active sb
- * reference and SB_BORN is not set yet.
- */
- up_write(&s->s_umount);
err = setup_bdev_super(s, flags, NULL);
- down_write(&s->s_umount);
if (!err)
err = nilfs_fill_super(s, data,
flags & SB_SILENT ? 1 : 0);
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 71e31e789b29..2d01517a2d59 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1304,7 +1304,7 @@ done:
* page cleaned. The VM has already locked the page and marked it clean.
*
* For non-resident attributes, ntfs_writepage() writes the @page by calling
- * the ntfs version of the generic block_write_full_page() function,
+ * the ntfs version of the generic block_write_full_folio() function,
* ntfs_write_block(), which in turn if necessary creates and writes the
* buffers associated with the page asynchronously.
*
@@ -1314,7 +1314,7 @@ done:
* vfs inode dirty code path for the inode the mft record belongs to or via the
* vm page dirty code path for the page the mft record is in.
*
- * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page().
+ * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_folio().
*
* Return 0 on success and -errno on error.
*/
@@ -1644,7 +1644,7 @@ const struct address_space_operations ntfs_normal_aops = {
.bmap = ntfs_bmap,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
/*
@@ -1658,7 +1658,7 @@ const struct address_space_operations ntfs_compressed_aops = {
#endif /* NTFS_RW */
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
/*
@@ -1673,7 +1673,7 @@ const struct address_space_operations ntfs_mst_aops = {
#endif /* NTFS_RW */
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
#ifdef NTFS_RW
@@ -1690,7 +1690,7 @@ const struct address_space_operations ntfs_mst_aops = {
*
* If the page does not have buffers, we create them and set them uptodate.
* The page may not be locked which is why we need to handle the buffers under
- * the mapping->private_lock. Once the buffers are marked dirty we no longer
+ * the mapping->i_private_lock. Once the buffers are marked dirty we no longer
* need the lock since try_to_free_buffers() does not free dirty buffers.
*/
void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
@@ -1702,11 +1702,11 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
BUG_ON(!PageUptodate(page));
end = ofs + ni->itype.index.block_size;
bh_size = VFS_I(ni)->i_sb->s_blocksize;
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (unlikely(!page_has_buffers(page))) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
bh = head = alloc_page_buffers(page, bh_size, true);
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (likely(!page_has_buffers(page))) {
struct buffer_head *tail;
@@ -1730,7 +1730,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
break;
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
filemap_dirty_folio(mapping, page_folio(page));
if (unlikely(buffers_to_free)) {
do {
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 4596c90e7b7c..629723a8d712 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1462,7 +1462,8 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
/**
* ntfs_dir_fsync - sync a directory to disk
* @filp: directory to be synced
- * @dentry: dentry describing the directory to sync
+ * @start: offset in bytes of the beginning of data range to sync
+ * @end: offset in bytes of the end of data range (inclusive)
* @datasync: if non-zero only flush user data and not metadata
*
* Data integrity sync of a directory to disk. Used for fsync, fdatasync, and
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 91b32b2377ac..ea9127ba3208 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6934,7 +6934,7 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
* nonzero data on subsequent file extends.
*
* We need to call this before i_size is updated on the inode because
- * otherwise block_write_full_page() will skip writeout of pages past
+ * otherwise block_write_full_folio() will skip writeout of pages past
* i_size.
*/
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index ba790219d528..b82185075de7 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -389,21 +389,18 @@ out_unlock:
/* Note: Because we don't support holes, our allocation has
* already happened (allocation writes zeros to the file data)
* so we don't have to worry about ordered writes in
- * ocfs2_writepage.
+ * ocfs2_writepages.
*
- * ->writepage is called during the process of invalidating the page cache
+ * ->writepages is called during the process of invalidating the page cache
* during blocked lock processing. It can't block on any cluster locks
* to during block mapping. It's relying on the fact that the block
* mapping can't have disappeared under the dirty pages that it is
* being asked to write back.
*/
-static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
+static int ocfs2_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- trace_ocfs2_writepage(
- (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
- page->index);
-
- return block_write_full_page(page, ocfs2_get_block, wbc);
+ return mpage_writepages(mapping, wbc, ocfs2_get_block);
}
/* Taken from ext3. We don't necessarily need the full blown
@@ -2471,7 +2468,7 @@ const struct address_space_operations ocfs2_aops = {
.dirty_folio = block_dirty_folio,
.read_folio = ocfs2_read_folio,
.readahead = ocfs2_readahead,
- .writepage = ocfs2_writepage,
+ .writepages = ocfs2_writepages,
.write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
@@ -2480,5 +2477,5 @@ const struct address_space_operations ocfs2_aops = {
.release_folio = ocfs2_release_folio,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 94e2a1244442..8b6d15010703 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -818,7 +818,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/*
* fs-writeback will release the dirty pages without page lock
* whose offset are over inode size, the release happens at
- * block_write_full_page().
+ * block_write_full_folio().
*/
i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode);
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index ac4fd1d5b128..9898c11bdfa1 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -1157,8 +1157,6 @@ DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end);
DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage);
-DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage);
-
DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap);
TRACE_EVENT(ocfs2_try_to_write_inline_data,
diff --git a/fs/open.c b/fs/open.c
index 3494a9cd8046..a84d21e55c39 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -304,6 +304,10 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
return ret;
+ ret = fsnotify_file_area_perm(file, MAY_WRITE, &offset, len);
+ if (ret)
+ return ret;
+
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
@@ -442,7 +446,8 @@ static const struct cred *access_override_creds(void)
* 'get_current_cred()' function), that will clear the
* non_rcu field, because now that other user may be
* expecting RCU freeing. But normal thread-synchronous
- * cred accesses will keep things non-RCY.
+ * cred accesses will keep things non-racy to avoid RCU
+ * freeing.
*/
override_cred->non_rcu = 1;
@@ -1177,44 +1182,6 @@ struct file *kernel_file_open(const struct path *path, int flags,
}
EXPORT_SYMBOL_GPL(kernel_file_open);
-/**
- * backing_file_open - open a backing file for kernel internal use
- * @user_path: path that the user reuqested to open
- * @flags: open flags
- * @real_path: path of the backing file
- * @cred: credentials for open
- *
- * Open a backing file for a stackable filesystem (e.g., overlayfs).
- * @user_path may be on the stackable filesystem and @real_path on the
- * underlying filesystem. In this case, we want to be able to return the
- * @user_path of the stackable filesystem. This is done by embedding the
- * returned file into a container structure that also stores the stacked
- * file's path, which can be retrieved using backing_file_user_path().
- */
-struct file *backing_file_open(const struct path *user_path, int flags,
- const struct path *real_path,
- const struct cred *cred)
-{
- struct file *f;
- int error;
-
- f = alloc_empty_backing_file(flags, cred);
- if (IS_ERR(f))
- return f;
-
- path_get(user_path);
- *backing_file_user_path(f) = *user_path;
- f->f_path = *real_path;
- error = do_dentry_open(f, d_inode(real_path->dentry), NULL);
- if (error) {
- fput(f);
- f = ERR_PTR(error);
- }
-
- return f;
-}
-EXPORT_SYMBOL_GPL(backing_file_open);
-
#define WILL_CREATE(flags) (flags & (O_CREAT | __O_TMPFILE))
#define O_PATH_FLAGS (O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC)
@@ -1574,7 +1541,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
int retval;
struct file *file;
- file = close_fd_get_file(fd);
+ file = file_close_fd(fd);
if (!file)
return -EBADF;
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index fec5020c3495..2ac67e04a6fb 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config OVERLAY_FS
tristate "Overlay filesystem support"
+ select FS_STACK
select EXPORTFS
help
An overlay filesystem combines two filesystems - an 'upper' filesystem
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 8bea66c97316..45cadc3aed85 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -230,6 +230,19 @@ static int ovl_copy_fileattr(struct inode *inode, const struct path *old,
return ovl_real_fileattr_set(new, &newfa);
}
+static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen)
+{
+ loff_t tmp;
+
+ if (WARN_ON_ONCE(pos != pos2))
+ return -EIO;
+ if (WARN_ON_ONCE(pos < 0 || len < 0 || totlen < 0))
+ return -EIO;
+ if (WARN_ON_ONCE(check_add_overflow(pos, len, &tmp)))
+ return -EIO;
+ return 0;
+}
+
static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
struct file *new_file, loff_t len)
{
@@ -244,13 +257,20 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
int error = 0;
ovl_path_lowerdata(dentry, &datapath);
- if (WARN_ON(datapath.dentry == NULL))
+ if (WARN_ON_ONCE(datapath.dentry == NULL) ||
+ WARN_ON_ONCE(len < 0))
return -EIO;
old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY);
if (IS_ERR(old_file))
return PTR_ERR(old_file);
+ error = rw_verify_area(READ, old_file, &old_pos, len);
+ if (!error)
+ error = rw_verify_area(WRITE, new_file, &new_pos, len);
+ if (error)
+ goto out_fput;
+
/* Try to use clone_file_range to clone up within the same fs */
ovl_start_write(dentry);
cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
@@ -265,7 +285,7 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
while (len) {
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
- long bytes;
+ ssize_t bytes;
if (len < this_len)
this_len = len;
@@ -309,11 +329,13 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
}
}
- ovl_start_write(dentry);
+ error = ovl_verify_area(old_pos, new_pos, this_len, len);
+ if (error)
+ break;
+
bytes = do_splice_direct(old_file, &old_pos,
new_file, &new_pos,
this_len, SPLICE_F_MOVE);
- ovl_end_write(dentry);
if (bytes <= 0) {
error = bytes;
break;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 131621daeb13..05536964d37f 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -9,25 +9,11 @@
#include <linux/xattr.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
-#include <linux/splice.h>
#include <linux/security.h>
-#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/backing-file.h>
#include "overlayfs.h"
-#include "../internal.h" /* for sb_init_dio_done_wq */
-
-struct ovl_aio_req {
- struct kiocb iocb;
- refcount_t ref;
- struct kiocb *orig_iocb;
- /* used for aio completion */
- struct work_struct work;
- long res;
-};
-
-static struct kmem_cache *ovl_aio_request_cachep;
-
static char ovl_whatisit(struct inode *inode, struct inode *realinode)
{
if (realinode != ovl_inode_upper(inode))
@@ -274,83 +260,16 @@ static void ovl_file_accessed(struct file *file)
touch_atime(&file->f_path);
}
-#define OVL_IOCB_MASK \
- (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
-
-static rwf_t iocb_to_rw_flags(int flags)
-{
- return (__force rwf_t)(flags & OVL_IOCB_MASK);
-}
-
-static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
-{
- if (refcount_dec_and_test(&aio_req->ref)) {
- fput(aio_req->iocb.ki_filp);
- kmem_cache_free(ovl_aio_request_cachep, aio_req);
- }
-}
-
-static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
-{
- struct kiocb *iocb = &aio_req->iocb;
- struct kiocb *orig_iocb = aio_req->orig_iocb;
-
- if (iocb->ki_flags & IOCB_WRITE) {
- kiocb_end_write(iocb);
- ovl_file_modified(orig_iocb->ki_filp);
- }
-
- orig_iocb->ki_pos = iocb->ki_pos;
- ovl_aio_put(aio_req);
-}
-
-static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
-{
- struct ovl_aio_req *aio_req = container_of(iocb,
- struct ovl_aio_req, iocb);
- struct kiocb *orig_iocb = aio_req->orig_iocb;
-
- ovl_aio_cleanup_handler(aio_req);
- orig_iocb->ki_complete(orig_iocb, res);
-}
-
-static void ovl_aio_complete_work(struct work_struct *work)
-{
- struct ovl_aio_req *aio_req = container_of(work,
- struct ovl_aio_req, work);
-
- ovl_aio_rw_complete(&aio_req->iocb, aio_req->res);
-}
-
-static void ovl_aio_queue_completion(struct kiocb *iocb, long res)
-{
- struct ovl_aio_req *aio_req = container_of(iocb,
- struct ovl_aio_req, iocb);
- struct kiocb *orig_iocb = aio_req->orig_iocb;
-
- /*
- * Punt to a work queue to serialize updates of mtime/size.
- */
- aio_req->res = res;
- INIT_WORK(&aio_req->work, ovl_aio_complete_work);
- queue_work(file_inode(orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
- &aio_req->work);
-}
-
-static int ovl_init_aio_done_wq(struct super_block *sb)
-{
- if (sb->s_dio_done_wq)
- return 0;
-
- return sb_init_dio_done_wq(sb);
-}
-
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct fd real;
- const struct cred *old_cred;
ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ovl_creds(file_inode(file)->i_sb),
+ .user_file = file,
+ .accessed = ovl_file_accessed,
+ };
if (!iov_iter_count(iter))
return 0;
@@ -359,37 +278,8 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
return ret;
- ret = -EINVAL;
- if (iocb->ki_flags & IOCB_DIRECT &&
- !(real.file->f_mode & FMODE_CAN_ODIRECT))
- goto out_fdput;
-
- old_cred = ovl_override_creds(file_inode(file)->i_sb);
- if (is_sync_kiocb(iocb)) {
- rwf_t rwf = iocb_to_rw_flags(iocb->ki_flags);
-
- ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, rwf);
- } else {
- struct ovl_aio_req *aio_req;
-
- ret = -ENOMEM;
- aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
- if (!aio_req)
- goto out;
-
- aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
- aio_req->iocb.ki_complete = ovl_aio_rw_complete;
- refcount_set(&aio_req->ref, 2);
- ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
- ovl_aio_put(aio_req);
- if (ret != -EIOCBQUEUED)
- ovl_aio_cleanup_handler(aio_req);
- }
-out:
- revert_creds(old_cred);
- ovl_file_accessed(file);
-out_fdput:
+ ret = backing_file_read_iter(real.file, iter, iocb, iocb->ki_flags,
+ &ctx);
fdput(real);
return ret;
@@ -400,9 +290,13 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct fd real;
- const struct cred *old_cred;
ssize_t ret;
int ifl = iocb->ki_flags;
+ struct backing_file_ctx ctx = {
+ .cred = ovl_creds(inode->i_sb),
+ .user_file = file,
+ .end_write = ovl_file_modified,
+ };
if (!iov_iter_count(iter))
return 0;
@@ -410,19 +304,11 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
inode_lock(inode);
/* Update mode */
ovl_copyattr(inode);
- ret = file_remove_privs(file);
- if (ret)
- goto out_unlock;
ret = ovl_real_fdget(file, &real);
if (ret)
goto out_unlock;
- ret = -EINVAL;
- if (iocb->ki_flags & IOCB_DIRECT &&
- !(real.file->f_mode & FMODE_CAN_ODIRECT))
- goto out_fdput;
-
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
@@ -431,42 +317,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* this property in case it is set by the issuer.
*/
ifl &= ~IOCB_DIO_CALLER_COMP;
-
- old_cred = ovl_override_creds(file_inode(file)->i_sb);
- if (is_sync_kiocb(iocb)) {
- rwf_t rwf = iocb_to_rw_flags(ifl);
-
- file_start_write(real.file);
- ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, rwf);
- file_end_write(real.file);
- /* Update size */
- ovl_file_modified(file);
- } else {
- struct ovl_aio_req *aio_req;
-
- ret = ovl_init_aio_done_wq(inode->i_sb);
- if (ret)
- goto out;
-
- ret = -ENOMEM;
- aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
- if (!aio_req)
- goto out;
-
- aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
- aio_req->iocb.ki_flags = ifl;
- aio_req->iocb.ki_complete = ovl_aio_queue_completion;
- refcount_set(&aio_req->ref, 2);
- kiocb_start_write(&aio_req->iocb);
- ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
- ovl_aio_put(aio_req);
- if (ret != -EIOCBQUEUED)
- ovl_aio_cleanup_handler(aio_req);
- }
-out:
- revert_creds(old_cred);
-out_fdput:
+ ret = backing_file_write_iter(real.file, iter, iocb, ifl, &ctx);
fdput(real);
out_unlock:
@@ -479,20 +330,21 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
- const struct cred *old_cred;
struct fd real;
ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ovl_creds(file_inode(in)->i_sb),
+ .user_file = in,
+ .accessed = ovl_file_accessed,
+ };
ret = ovl_real_fdget(in, &real);
if (ret)
return ret;
- old_cred = ovl_override_creds(file_inode(in)->i_sb);
- ret = vfs_splice_read(real.file, ppos, pipe, len, flags);
- revert_creds(old_cred);
- ovl_file_accessed(in);
-
+ ret = backing_file_splice_read(real.file, ppos, pipe, len, flags, &ctx);
fdput(real);
+
return ret;
}
@@ -508,30 +360,23 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct fd real;
- const struct cred *old_cred;
struct inode *inode = file_inode(out);
ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ovl_creds(inode->i_sb),
+ .user_file = out,
+ .end_write = ovl_file_modified,
+ };
inode_lock(inode);
/* Update mode */
ovl_copyattr(inode);
- ret = file_remove_privs(out);
- if (ret)
- goto out_unlock;
ret = ovl_real_fdget(out, &real);
if (ret)
goto out_unlock;
- old_cred = ovl_override_creds(inode->i_sb);
- file_start_write(real.file);
-
- ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
-
- file_end_write(real.file);
- /* Update size */
- ovl_file_modified(out);
- revert_creds(old_cred);
+ ret = backing_file_splice_write(pipe, real.file, ppos, len, flags, &ctx);
fdput(real);
out_unlock:
@@ -569,23 +414,13 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file *realfile = file->private_data;
- const struct cred *old_cred;
- int ret;
-
- if (!realfile->f_op->mmap)
- return -ENODEV;
+ struct backing_file_ctx ctx = {
+ .cred = ovl_creds(file_inode(file)->i_sb),
+ .user_file = file,
+ .accessed = ovl_file_accessed,
+ };
- if (WARN_ON(file != vma->vm_file))
- return -EIO;
-
- vma_set_file(vma, realfile);
-
- old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = call_mmap(vma->vm_file, vma);
- revert_creds(old_cred);
- ovl_file_accessed(file);
-
- return ret;
+ return backing_file_mmap(realfile, vma, &ctx);
}
static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
@@ -778,19 +613,3 @@ const struct file_operations ovl_file_operations = {
.copy_file_range = ovl_copy_file_range,
.remap_file_range = ovl_remap_file_range,
};
-
-int __init ovl_aio_request_cache_init(void)
-{
- ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req",
- sizeof(struct ovl_aio_req),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!ovl_aio_request_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-void ovl_aio_request_cache_destroy(void)
-{
- kmem_cache_destroy(ovl_aio_request_cachep);
-}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 05c3dd597fa8..5ba11eb43767 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -425,6 +425,12 @@ int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
+
+static inline const struct cred *ovl_creds(struct super_block *sb)
+{
+ return OVL_FS(sb)->creator_cred;
+}
+
int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
@@ -837,8 +843,6 @@ struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
/* file.c */
extern const struct file_operations ovl_file_operations;
-int __init ovl_aio_request_cache_init(void);
-void ovl_aio_request_cache_destroy(void);
int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa);
int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa);
int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index a0967bb25003..7131e3f5b9f5 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1454,6 +1454,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
* lead to unexpected results.
*/
sb->s_iflags |= SB_I_NOUMASK;
+ sb->s_iflags |= SB_I_EVM_UNSUPPORTED;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
@@ -1501,14 +1502,10 @@ static int __init ovl_init(void)
if (ovl_inode_cachep == NULL)
return -ENOMEM;
- err = ovl_aio_request_cache_init();
- if (!err) {
- err = register_filesystem(&ovl_fs_type);
- if (!err)
- return 0;
+ err = register_filesystem(&ovl_fs_type);
+ if (!err)
+ return 0;
- ovl_aio_request_cache_destroy();
- }
kmem_cache_destroy(ovl_inode_cachep);
return err;
@@ -1524,7 +1521,6 @@ static void __exit ovl_exit(void)
*/
rcu_barrier();
kmem_cache_destroy(ovl_inode_cachep);
- ovl_aio_request_cache_destroy();
}
module_init(ovl_init);
diff --git a/fs/pipe.c b/fs/pipe.c
index 804a7d789452..8d9286a1f2e8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -446,6 +446,18 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
bool was_empty = false;
bool wake_next_writer = false;
+ /*
+ * Reject writing to watch queue pipes before the point where we lock
+ * the pipe.
+ * Otherwise, lockdep would be unhappy if the caller already has another
+ * pipe locked.
+ * If we had to support locking a normal pipe and a notification pipe at
+ * the same time, we could set up lockdep annotations for that, but
+ * since we don't actually need that, it's simpler to just bail here.
+ */
+ if (pipe_has_watch_queue(pipe))
+ return -EXDEV;
+
/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;
@@ -458,11 +470,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- if (pipe_has_watch_queue(pipe)) {
- ret = -EXDEV;
- goto out;
- }
-
/*
* If it wasn't empty we try to merge new data into
* the last buffer.
@@ -1317,6 +1324,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
pipe->tail = tail;
pipe->head = head;
+ if (!pipe_has_watch_queue(pipe)) {
+ pipe->max_usage = nr_slots;
+ pipe->nr_accounted = nr_slots;
+ }
+
spin_unlock_irq(&pipe->rd_wait.lock);
/* This might have made more room for writers */
@@ -1368,8 +1380,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
if (ret < 0)
goto out_revert_acct;
- pipe->max_usage = nr_slots;
- pipe->nr_accounted = nr_slots;
return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
diff --git a/fs/pnode.c b/fs/pnode.c
index e4d0340393d5..a799e0315cc9 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -468,7 +468,7 @@ static void umount_one(struct mount *mnt, struct list_head *to_umount)
mnt->mnt.mnt_flags |= MNT_UMOUNT;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_umounting);
- list_move_tail(&mnt->mnt_list, to_umount);
+ move_from_ns(mnt, to_umount);
}
/*
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index a05fe94970ce..e1af20893ebe 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -600,7 +600,7 @@ EXPORT_SYMBOL(__posix_acl_chmod);
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs @nop_mnt_idmap.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*/
int
posix_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry,
@@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs @nop_mnt_idmap.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*
* Called from set_acl inode operations.
*/
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd31e3b6bf77..98a031ac2648 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -97,6 +97,7 @@
#include <linux/resctrl.h>
#include <linux/cn_proc.h>
#include <linux/ksm.h>
+#include <uapi/linux/lsm.h>
#include <trace/events/oom.h>
#include "internal.h"
#include "fd.h"
@@ -146,10 +147,10 @@ struct pid_entry {
NOD(NAME, (S_IFREG|(MODE)), \
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
-#define ATTR(LSM, NAME, MODE) \
+#define ATTR(LSMID, NAME, MODE) \
NOD(NAME, (S_IFREG|(MODE)), \
NULL, &proc_pid_attr_operations, \
- { .lsm = LSM })
+ { .lsmid = LSMID })
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
@@ -2726,7 +2727,7 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
if (!task)
return -ESRCH;
- length = security_getprocattr(task, PROC_I(inode)->op.lsm,
+ length = security_getprocattr(task, PROC_I(inode)->op.lsmid,
file->f_path.dentry->d_name.name,
&p);
put_task_struct(task);
@@ -2784,7 +2785,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
if (rv < 0)
goto out_free;
- rv = security_setprocattr(PROC_I(inode)->op.lsm,
+ rv = security_setprocattr(PROC_I(inode)->op.lsmid,
file->f_path.dentry->d_name.name, page,
count);
mutex_unlock(&current->signal->cred_guard_mutex);
@@ -2833,27 +2834,27 @@ static const struct inode_operations proc_##LSM##_attr_dir_inode_ops = { \
#ifdef CONFIG_SECURITY_SMACK
static const struct pid_entry smack_attr_dir_stuff[] = {
- ATTR("smack", "current", 0666),
+ ATTR(LSM_ID_SMACK, "current", 0666),
};
LSM_DIR_OPS(smack);
#endif
#ifdef CONFIG_SECURITY_APPARMOR
static const struct pid_entry apparmor_attr_dir_stuff[] = {
- ATTR("apparmor", "current", 0666),
- ATTR("apparmor", "prev", 0444),
- ATTR("apparmor", "exec", 0666),
+ ATTR(LSM_ID_APPARMOR, "current", 0666),
+ ATTR(LSM_ID_APPARMOR, "prev", 0444),
+ ATTR(LSM_ID_APPARMOR, "exec", 0666),
};
LSM_DIR_OPS(apparmor);
#endif
static const struct pid_entry attr_dir_stuff[] = {
- ATTR(NULL, "current", 0666),
- ATTR(NULL, "prev", 0444),
- ATTR(NULL, "exec", 0666),
- ATTR(NULL, "fscreate", 0666),
- ATTR(NULL, "keycreate", 0666),
- ATTR(NULL, "sockcreate", 0666),
+ ATTR(LSM_ID_UNDEF, "current", 0666),
+ ATTR(LSM_ID_UNDEF, "prev", 0444),
+ ATTR(LSM_ID_UNDEF, "exec", 0666),
+ ATTR(LSM_ID_UNDEF, "fscreate", 0666),
+ ATTR(LSM_ID_UNDEF, "keycreate", 0666),
+ ATTR(LSM_ID_UNDEF, "sockcreate", 0666),
#ifdef CONFIG_SECURITY_SMACK
DIR("smack", 0555,
proc_smack_attr_dir_inode_ops, proc_smack_attr_dir_ops),
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9a8f32f21ff5..a71ac5379584 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -92,7 +92,7 @@ union proc_op {
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
struct task_struct *task);
- const char *lsm;
+ int lsmid;
};
struct proc_inode {
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 435b61054b5b..62b16f42d5d2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -273,7 +273,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
const char *name = NULL;
if (file) {
- struct inode *inode = file_inode(vma->vm_file);
+ const struct inode *inode = file_user_inode(vma->vm_file);
+
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -865,7 +866,8 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- hugepage_vma_check(vma, vma->vm_flags, true, false, true));
+ !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
+ true, THP_ORDERS_ALL));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -1761,7 +1763,7 @@ static int pagemap_release(struct inode *inode, struct file *file)
#define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \
PAGE_IS_FILE | PAGE_IS_PRESENT | \
PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \
- PAGE_IS_HUGE)
+ PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
#define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
struct pagemap_scan_private {
@@ -1793,6 +1795,8 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
if (is_zero_pfn(pte_pfn(pte)))
categories |= PAGE_IS_PFNZERO;
+ if (pte_soft_dirty(pte))
+ categories |= PAGE_IS_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
swp_entry_t swp;
@@ -1806,6 +1810,8 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
!PageAnon(pfn_swap_entry_to_page(swp)))
categories |= PAGE_IS_FILE;
}
+ if (pte_swp_soft_dirty(pte))
+ categories |= PAGE_IS_SOFT_DIRTY;
}
return categories;
@@ -1853,12 +1859,16 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
if (is_zero_pfn(pmd_pfn(pmd)))
categories |= PAGE_IS_PFNZERO;
+ if (pmd_soft_dirty(pmd))
+ categories |= PAGE_IS_SOFT_DIRTY;
} else if (is_swap_pmd(pmd)) {
swp_entry_t swp;
categories |= PAGE_IS_SWAPPED;
if (!pmd_swp_uffd_wp(pmd))
categories |= PAGE_IS_WRITTEN;
+ if (pmd_swp_soft_dirty(pmd))
+ categories |= PAGE_IS_SOFT_DIRTY;
if (p->masks_of_interest & PAGE_IS_FILE) {
swp = pmd_to_swp_entry(pmd);
@@ -1905,10 +1915,14 @@ static unsigned long pagemap_hugetlb_category(pte_t pte)
categories |= PAGE_IS_FILE;
if (is_zero_pfn(pte_pfn(pte)))
categories |= PAGE_IS_PFNZERO;
+ if (pte_soft_dirty(pte))
+ categories |= PAGE_IS_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
categories |= PAGE_IS_SWAPPED;
if (!pte_swp_uffd_wp_any(pte))
categories |= PAGE_IS_WRITTEN;
+ if (pte_swp_soft_dirty(pte))
+ categories |= PAGE_IS_SOFT_DIRTY;
}
return categories;
@@ -2007,6 +2021,9 @@ static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
if (wp_allowed)
vma_category |= PAGE_IS_WPALLOWED;
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ vma_category |= PAGE_IS_SOFT_DIRTY;
+
if (!pagemap_scan_is_interesting_vma(vma_category, p))
return 1;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 250eb5bf7b52..0a808951b7d3 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -142,13 +142,9 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
MAJOR(sb->s_dev), MINOR(sb->s_dev));
- if (sb->s_op->show_path) {
- err = sb->s_op->show_path(m, mnt->mnt_root);
- if (err)
- goto out;
- } else {
- seq_dentry(m, mnt->mnt_root, " \t\n\\");
- }
+ err = show_path(m, mnt->mnt_root);
+ if (err)
+ goto out;
seq_putc(m, ' ');
/* mountpoints outside of chroot jail will give SEQ_SKIP on this */
@@ -283,8 +279,6 @@ static int mounts_open_common(struct inode *inode, struct file *file,
p->ns = ns;
p->root = root;
p->show = show;
- INIT_LIST_HEAD(&p->cursor.mnt_list);
- p->cursor.mnt.mnt_flags = MNT_CURSOR;
return 0;
@@ -301,7 +295,6 @@ static int mounts_release(struct inode *inode, struct file *file)
struct seq_file *m = file->private_data;
struct proc_mounts *p = m->private;
path_put(&p->root);
- mnt_cursor_del(p->ns, &p->cursor);
put_mnt_ns(p->ns);
return seq_release_private(inode, file);
}
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index efb1b4c1a0a4..7a6d980e614d 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -70,7 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* make various checks */
order = get_order(newsize);
- if (unlikely(order > MAX_ORDER))
+ if (unlikely(order > MAX_PAGE_ORDER))
return -EFBIG;
ret = inode_newsize_ok(inode, newsize);
diff --git a/fs/read_write.c b/fs/read_write.c
index 4771701c896b..d4c036e82b6c 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -354,6 +354,9 @@ out_putf:
int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
{
+ int mask = read_write == READ ? MAY_READ : MAY_WRITE;
+ int ret;
+
if (unlikely((ssize_t) count < 0))
return -EINVAL;
@@ -371,8 +374,11 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
}
}
- return security_file_permission(file,
- read_write == READ ? MAY_READ : MAY_WRITE);
+ ret = security_file_permission(file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_file_area_perm(file, mask, ppos, count);
}
EXPORT_SYMBOL(rw_verify_area);
@@ -773,12 +779,14 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
return ret;
}
-static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
- loff_t *pos, rwf_t flags)
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter)
{
size_t tot_len;
ssize_t ret = 0;
+ if (!file->f_op->read_iter)
+ return -EINVAL;
if (!(file->f_mode & FMODE_READ))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_READ))
@@ -787,22 +795,20 @@ static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
tot_len = iov_iter_count(iter);
if (!tot_len)
goto out;
- ret = rw_verify_area(READ, file, pos, tot_len);
+ ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
if (ret < 0)
return ret;
- if (file->f_op->read_iter)
- ret = do_iter_readv_writev(file, iter, pos, READ, flags);
- else
- ret = do_loop_readv_writev(file, iter, pos, READ, flags);
+ ret = call_read_iter(file, iocb, iter);
out:
if (ret >= 0)
fsnotify_access(file);
return ret;
}
+EXPORT_SYMBOL(vfs_iocb_iter_read);
-ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
- struct iov_iter *iter)
+ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
+ rwf_t flags)
{
size_t tot_len;
ssize_t ret = 0;
@@ -817,33 +823,30 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
tot_len = iov_iter_count(iter);
if (!tot_len)
goto out;
- ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
+ ret = rw_verify_area(READ, file, ppos, tot_len);
if (ret < 0)
return ret;
- ret = call_read_iter(file, iocb, iter);
+ ret = do_iter_readv_writev(file, iter, ppos, READ, flags);
out:
if (ret >= 0)
fsnotify_access(file);
return ret;
}
-EXPORT_SYMBOL(vfs_iocb_iter_read);
-
-ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
- rwf_t flags)
-{
- if (!file->f_op->read_iter)
- return -EINVAL;
- return do_iter_read(file, iter, ppos, flags);
-}
EXPORT_SYMBOL(vfs_iter_read);
-static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
- loff_t *pos, rwf_t flags)
+/*
+ * Caller is responsible for calling kiocb_end_write() on completion
+ * if async iocb was queued.
+ */
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter)
{
size_t tot_len;
ssize_t ret = 0;
+ if (!file->f_op->write_iter)
+ return -EINVAL;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
@@ -852,88 +855,127 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
tot_len = iov_iter_count(iter);
if (!tot_len)
return 0;
- ret = rw_verify_area(WRITE, file, pos, tot_len);
+ ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
if (ret < 0)
return ret;
- if (file->f_op->write_iter)
- ret = do_iter_readv_writev(file, iter, pos, WRITE, flags);
- else
- ret = do_loop_readv_writev(file, iter, pos, WRITE, flags);
+ kiocb_start_write(iocb);
+ ret = call_write_iter(file, iocb, iter);
+ if (ret != -EIOCBQUEUED)
+ kiocb_end_write(iocb);
if (ret > 0)
fsnotify_modify(file);
+
return ret;
}
+EXPORT_SYMBOL(vfs_iocb_iter_write);
-ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
- struct iov_iter *iter)
+ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
+ rwf_t flags)
{
size_t tot_len;
- ssize_t ret = 0;
+ ssize_t ret;
- if (!file->f_op->write_iter)
- return -EINVAL;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
+ if (!file->f_op->write_iter)
+ return -EINVAL;
tot_len = iov_iter_count(iter);
if (!tot_len)
return 0;
- ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
+
+ ret = rw_verify_area(WRITE, file, ppos, tot_len);
if (ret < 0)
return ret;
- ret = call_write_iter(file, iocb, iter);
+ file_start_write(file);
+ ret = do_iter_readv_writev(file, iter, ppos, WRITE, flags);
if (ret > 0)
fsnotify_modify(file);
+ file_end_write(file);
return ret;
}
-EXPORT_SYMBOL(vfs_iocb_iter_write);
-
-ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
- rwf_t flags)
-{
- if (!file->f_op->write_iter)
- return -EINVAL;
- return do_iter_write(file, iter, ppos, flags);
-}
EXPORT_SYMBOL(vfs_iter_write);
static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
- unsigned long vlen, loff_t *pos, rwf_t flags)
+ unsigned long vlen, loff_t *pos, rwf_t flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
- ssize_t ret;
+ size_t tot_len;
+ ssize_t ret = 0;
- ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
- if (ret >= 0) {
- ret = do_iter_read(file, &iter, pos, flags);
- kfree(iov);
- }
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_READ))
+ return -EINVAL;
+
+ ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov,
+ &iter);
+ if (ret < 0)
+ return ret;
+
+ tot_len = iov_iter_count(&iter);
+ if (!tot_len)
+ goto out;
+ ret = rw_verify_area(READ, file, pos, tot_len);
+ if (ret < 0)
+ goto out;
+
+ if (file->f_op->read_iter)
+ ret = do_iter_readv_writev(file, &iter, pos, READ, flags);
+ else
+ ret = do_loop_readv_writev(file, &iter, pos, READ, flags);
+out:
+ if (ret >= 0)
+ fsnotify_access(file);
+ kfree(iov);
return ret;
}
static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
- unsigned long vlen, loff_t *pos, rwf_t flags)
+ unsigned long vlen, loff_t *pos, rwf_t flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
- ssize_t ret;
+ size_t tot_len;
+ ssize_t ret = 0;
- ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
- if (ret >= 0) {
- file_start_write(file);
- ret = do_iter_write(file, &iter, pos, flags);
- file_end_write(file);
- kfree(iov);
- }
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
+
+ ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov,
+ &iter);
+ if (ret < 0)
+ return ret;
+
+ tot_len = iov_iter_count(&iter);
+ if (!tot_len)
+ goto out;
+
+ ret = rw_verify_area(WRITE, file, pos, tot_len);
+ if (ret < 0)
+ goto out;
+
+ file_start_write(file);
+ if (file->f_op->write_iter)
+ ret = do_iter_readv_writev(file, &iter, pos, WRITE, flags);
+ else
+ ret = do_loop_readv_writev(file, &iter, pos, WRITE, flags);
+ if (ret > 0)
+ fsnotify_modify(file);
+ file_end_write(file);
+out:
+ kfree(iov);
return ret;
}
@@ -1178,7 +1220,7 @@ COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
#endif /* CONFIG_COMPAT */
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
- size_t count, loff_t max)
+ size_t count, loff_t max)
{
struct fd in, out;
struct inode *in_inode, *out_inode;
@@ -1250,10 +1292,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
retval = rw_verify_area(WRITE, out.file, &out_pos, count);
if (retval < 0)
goto fput_out;
- file_start_write(out.file);
retval = do_splice_direct(in.file, &pos, out.file, &out_pos,
count, fl);
- file_end_write(out.file);
} else {
if (out.file->f_flags & O_NONBLOCK)
fl |= SPLICE_F_NONBLOCK;
@@ -1362,38 +1402,6 @@ COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
}
#endif
-/**
- * generic_copy_file_range - copy data between two files
- * @file_in: file structure to read from
- * @pos_in: file offset to read from
- * @file_out: file structure to write data to
- * @pos_out: file offset to write data to
- * @len: amount of data to copy
- * @flags: copy flags
- *
- * This is a generic filesystem helper to copy data from one file to another.
- * It has no constraints on the source or destination file owners - the files
- * can belong to different superblocks and different filesystem types. Short
- * copies are allowed.
- *
- * This should be called from the @file_out filesystem, as per the
- * ->copy_file_range() method.
- *
- * Returns the number of bytes copied or a negative error indicating the
- * failure.
- */
-
-ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags)
-{
- lockdep_assert(sb_write_started(file_inode(file_out)->i_sb));
-
- return do_splice_direct(file_in, &pos_in, file_out, &pos_out,
- len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
-}
-EXPORT_SYMBOL(generic_copy_file_range);
-
/*
* Performs necessary checks before doing a file copy
*
@@ -1478,6 +1486,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
{
ssize_t ret;
bool splice = flags & COPY_FILE_SPLICE;
+ bool samesb = file_inode(file_in)->i_sb == file_inode(file_out)->i_sb;
if (flags & ~COPY_FILE_SPLICE)
return -EINVAL;
@@ -1509,19 +1518,24 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
ret = file_out->f_op->copy_file_range(file_in, pos_in,
file_out, pos_out,
len, flags);
- goto done;
- }
-
- if (!splice && file_in->f_op->remap_file_range &&
- file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
+ } else if (!splice && file_in->f_op->remap_file_range && samesb) {
ret = file_in->f_op->remap_file_range(file_in, pos_in,
file_out, pos_out,
min_t(loff_t, MAX_RW_COUNT, len),
REMAP_FILE_CAN_SHORTEN);
- if (ret > 0)
- goto done;
+ /* fallback to splice */
+ if (ret <= 0)
+ splice = true;
+ } else if (samesb) {
+ /* Fallback to splice for same sb copy for backward compat */
+ splice = true;
}
+ file_end_write(file_out);
+
+ if (!splice)
+ goto done;
+
/*
* We can get here for same sb copy of filesystems that do not implement
* ->copy_file_range() in case filesystem does not support clone or in
@@ -1533,11 +1547,16 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
* and which filesystems do not, that will allow userspace tools to
* make consistent desicions w.r.t using copy_file_range().
*
- * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE.
+ * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE
+ * for server-side-copy between any two sb.
+ *
+ * In any case, we call do_splice_direct() and not splice_file_range(),
+ * without file_start_write() held, to avoid possible deadlocks related
+ * to splicing from input file, while file_start_write() is held on
+ * the output file on a different sb.
*/
- ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
- flags);
-
+ ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out,
+ min_t(size_t, len, MAX_RW_COUNT), 0);
done:
if (ret > 0) {
fsnotify_access(file_in);
@@ -1549,8 +1568,6 @@ done:
inc_syscr(current);
inc_syscw(current);
- file_end_write(file_out);
-
return ret;
}
EXPORT_SYMBOL(vfs_copy_file_range);
diff --git a/fs/readdir.c b/fs/readdir.c
index c8c46e294431..278bc0254732 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -96,6 +96,10 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
if (res)
goto out;
+ res = fsnotify_file_perm(file, MAY_READ);
+ if (res)
+ goto out;
+
res = down_read_killable(&inode->i_rwsem);
if (res)
goto out;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 2138ee7d271d..5faf702f8d15 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1407,7 +1407,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
INITIALIZE_PATH(path);
int item_len = 0;
int tb_init = 0;
- struct cpu_key cpu_key;
+ struct cpu_key cpu_key = {};
int retval;
int quota_cut_bytes = 0;
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 87ae4f0dc3aa..f8c1120b8311 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -102,7 +102,9 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
bool write)
{
+ int mask = write ? MAY_WRITE : MAY_READ;
loff_t tmp;
+ int ret;
if (unlikely(pos < 0 || len < 0))
return -EINVAL;
@@ -110,7 +112,11 @@ static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
if (unlikely(check_add_overflow(pos, len, &tmp)))
return -EINVAL;
- return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
+ ret = security_file_permission(file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_file_area_perm(file, mask, &pos, len);
}
/*
@@ -385,14 +391,6 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
if (!file_in->f_op->remap_file_range)
return -EOPNOTSUPP;
- ret = remap_verify_area(file_in, pos_in, len, false);
- if (ret)
- return ret;
-
- ret = remap_verify_area(file_out, pos_out, len, true);
- if (ret)
- return ret;
-
ret = file_in->f_op->remap_file_range(file_in, pos_in,
file_out, pos_out, len, remap_flags);
if (ret < 0)
@@ -410,6 +408,14 @@ loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
{
loff_t ret;
+ ret = remap_verify_area(file_in, pos_in, len, false);
+ if (ret)
+ return ret;
+
+ ret = remap_verify_area(file_out, pos_out, len, true);
+ if (ret)
+ return ret;
+
file_start_write(file_out);
ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
remap_flags);
@@ -420,7 +426,7 @@ loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
EXPORT_SYMBOL(vfs_clone_file_range);
/* Check whether we are allowed to dedupe the destination file */
-static bool allow_file_dedupe(struct file *file)
+static bool may_dedupe_file(struct file *file)
{
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct inode *inode = file_inode(file);
@@ -445,24 +451,29 @@ loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
REMAP_FILE_CAN_SHORTEN));
- ret = mnt_want_write_file(dst_file);
- if (ret)
- return ret;
-
/*
* This is redundant if called from vfs_dedupe_file_range(), but other
* callers need it and it's not performance sesitive...
*/
ret = remap_verify_area(src_file, src_pos, len, false);
if (ret)
- goto out_drop_write;
+ return ret;
ret = remap_verify_area(dst_file, dst_pos, len, true);
if (ret)
- goto out_drop_write;
+ return ret;
+
+ /*
+ * This needs to be called after remap_verify_area() because of
+ * sb_start_write() and before may_dedupe_file() because the mount's
+ * MAY_WRITE need to be checked with mnt_get_write_access_file() held.
+ */
+ ret = mnt_want_write_file(dst_file);
+ if (ret)
+ return ret;
ret = -EPERM;
- if (!allow_file_dedupe(dst_file))
+ if (!may_dedupe_file(dst_file))
goto out_drop_write;
ret = -EXDEV;
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 2131638f26d0..99b0ade833aa 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -25,6 +25,7 @@
#include <linux/freezer.h>
#include <linux/namei.h>
#include <linux/random.h>
+#include <linux/splice.h>
#include <linux/uuid.h>
#include <linux/xattr.h>
#include <uapi/linux/magic.h>
@@ -1506,8 +1507,8 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
free_xid(xid);
if (rc == -EOPNOTSUPP || rc == -EXDEV)
- rc = generic_copy_file_range(src_file, off, dst_file,
- destoff, len, flags);
+ rc = splice_copy_file_range(src_file, off, dst_file,
+ destoff, len);
return rc;
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 55b3ce944022..5e32c79f03a7 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -994,7 +994,6 @@ release_iface(struct kref *ref)
struct cifs_server_iface *iface = container_of(ref,
struct cifs_server_iface,
refcount);
- list_del_init(&iface->iface_head);
kfree(iface);
}
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index dd2a1fb65e71..dc9b95ca71e6 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -216,22 +216,29 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
/* If server is a channel, select the primary channel */
pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+ /*
+ * if the server has been marked for termination, there is a
+ * chance that the remaining channels all need reconnect. To be
+ * on the safer side, mark the session and trees for reconnect
+ * for this scenario. This might cause a few redundant session
+ * setup and tree connect requests, but it is better than not doing
+ * a tree connect when needed, and all following requests failing
+ */
+ if (server->terminate) {
+ mark_smb_session = true;
+ server = pserver;
+ }
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
- /*
- * if channel has been marked for termination, nothing to do
- * for the channel. in fact, we cannot find the channel for the
- * server. So safe to exit here
- */
- if (server->terminate)
- break;
-
/* check if iface is still active */
- if (!cifs_chan_is_iface_active(ses, server))
+ spin_lock(&ses->chan_lock);
+ if (!cifs_chan_is_iface_active(ses, server)) {
+ spin_unlock(&ses->chan_lock);
cifs_chan_update_iface(ses, server);
+ spin_lock(&ses->chan_lock);
+ }
- spin_lock(&ses->chan_lock);
if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
spin_unlock(&ses->chan_lock);
continue;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 32a8525415d9..4e84e88b47e3 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -2706,8 +2706,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
*/
if (!folio_clear_dirty_for_io(folio))
WARN_ON(1);
- if (folio_start_writeback(folio))
- WARN_ON(1);
+ folio_start_writeback(folio);
*_count -= folio_nr_pages(folio);
folio_unlock(folio);
@@ -2742,8 +2741,7 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
int rc;
/* The folio should be locked, dirty and not undergoing writeback. */
- if (folio_start_writeback(folio))
- WARN_ON(1);
+ folio_start_writeback(folio);
count -= folio_nr_pages(folio);
len = folio_size(folio);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 66b310208545..14bc745de199 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -595,16 +595,12 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
}
/*
- * Go through iface_list and do kref_put to remove
- * any unused ifaces. ifaces in use will be removed
- * when the last user calls a kref_put on it
+ * Go through iface_list and mark them as inactive
*/
list_for_each_entry_safe(iface, niface, &ses->iface_list,
- iface_head) {
+ iface_head)
iface->is_active = 0;
- kref_put(&iface->refcount, release_iface);
- ses->iface_count--;
- }
+
spin_unlock(&ses->iface_lock);
/*
@@ -678,10 +674,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
iface_head) {
ret = iface_cmp(iface, &tmp_iface);
if (!ret) {
- /* just get a ref so that it doesn't get picked/freed */
iface->is_active = 1;
- kref_get(&iface->refcount);
- ses->iface_count++;
spin_unlock(&ses->iface_lock);
goto next_iface;
} else if (ret < 0) {
@@ -748,6 +741,20 @@ next_iface:
}
out:
+ /*
+ * Go through the list again and put the inactive entries
+ */
+ spin_lock(&ses->iface_lock);
+ list_for_each_entry_safe(iface, niface, &ses->iface_list,
+ iface_head) {
+ if (!iface->is_active) {
+ list_del(&iface->iface_head);
+ kref_put(&iface->refcount, release_iface);
+ ses->iface_count--;
+ }
+ }
+ spin_unlock(&ses->iface_lock);
+
return rc;
}
@@ -784,9 +791,14 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
goto out;
/* check if iface is still active */
+ spin_lock(&ses->chan_lock);
pserver = ses->chans[0].server;
- if (pserver && !cifs_chan_is_iface_active(ses, pserver))
+ if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
+ spin_unlock(&ses->chan_lock);
cifs_chan_update_iface(ses, pserver);
+ spin_lock(&ses->chan_lock);
+ }
+ spin_unlock(&ses->chan_lock);
out:
kfree(out_buf);
diff --git a/fs/splice.c b/fs/splice.c
index d983d375ff11..218e24b1ac40 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -201,7 +201,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
unsigned int tail = pipe->tail;
unsigned int head = pipe->head;
unsigned int mask = pipe->ring_size - 1;
- int ret = 0, page_nr = 0;
+ ssize_t ret = 0;
+ int page_nr = 0;
if (!spd_pages)
return 0;
@@ -673,10 +674,13 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
.u.file = out,
};
int nbufs = pipe->max_usage;
- struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
- GFP_KERNEL);
+ struct bio_vec *array;
ssize_t ret;
+ if (!out->f_op->write_iter)
+ return -EINVAL;
+
+ array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL);
if (unlikely(!array))
return -ENOMEM;
@@ -684,6 +688,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
splice_from_pipe_begin(&sd);
while (sd.total_len) {
+ struct kiocb kiocb;
struct iov_iter from;
unsigned int head, tail, mask;
size_t left;
@@ -733,7 +738,10 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
}
iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left);
- ret = vfs_iter_write(out, &from, &sd.pos, 0);
+ init_sync_kiocb(&kiocb, out);
+ kiocb.ki_pos = sd.pos;
+ ret = call_write_iter(out, &kiocb, &from);
+ sd.pos = kiocb.ki_pos;
if (ret <= 0)
break;
@@ -925,8 +933,8 @@ static int warn_unsupported(struct file *file, const char *op)
/*
* Attempt to initiate a splice from pipe to file.
*/
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags)
+static ssize_t do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
{
if (unlikely(!out->f_op->splice_write))
return warn_unsupported(out, "write");
@@ -944,27 +952,15 @@ static void do_splice_eof(struct splice_desc *sd)
sd->splice_eof(sd);
}
-/**
- * vfs_splice_read - Read data from a file and splice it into a pipe
- * @in: File to splice from
- * @ppos: Input file offset
- * @pipe: Pipe to splice to
- * @len: Number of bytes to splice
- * @flags: Splice modifier flags (SPLICE_F_*)
- *
- * Splice the requested amount of data from the input file to the pipe. This
- * is synchronous as the caller must hold the pipe lock across the entire
- * operation.
- *
- * If successful, it returns the amount of data spliced, 0 if it hit the EOF or
- * a hole and a negative error code otherwise.
+/*
+ * Callers already called rw_verify_area() on the entire range.
+ * No need to call it for sub ranges.
*/
-long vfs_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
+static ssize_t do_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
unsigned int p_space;
- int ret;
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
@@ -975,10 +971,6 @@ long vfs_splice_read(struct file *in, loff_t *ppos,
p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail);
len = min_t(size_t, len, p_space << PAGE_SHIFT);
- ret = rw_verify_area(READ, in, ppos, len);
- if (unlikely(ret < 0))
- return ret;
-
if (unlikely(len > MAX_RW_COUNT))
len = MAX_RW_COUNT;
@@ -992,6 +984,34 @@ long vfs_splice_read(struct file *in, loff_t *ppos,
return copy_splice_read(in, ppos, pipe, len, flags);
return in->f_op->splice_read(in, ppos, pipe, len, flags);
}
+
+/**
+ * vfs_splice_read - Read data from a file and splice it into a pipe
+ * @in: File to splice from
+ * @ppos: Input file offset
+ * @pipe: Pipe to splice to
+ * @len: Number of bytes to splice
+ * @flags: Splice modifier flags (SPLICE_F_*)
+ *
+ * Splice the requested amount of data from the input file to the pipe. This
+ * is synchronous as the caller must hold the pipe lock across the entire
+ * operation.
+ *
+ * If successful, it returns the amount of data spliced, 0 if it hit the EOF or
+ * a hole and a negative error code otherwise.
+ */
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+
+ ret = rw_verify_area(READ, in, ppos, len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return do_splice_read(in, ppos, pipe, len, flags);
+}
EXPORT_SYMBOL_GPL(vfs_splice_read);
/**
@@ -1011,7 +1031,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
splice_direct_actor *actor)
{
struct pipe_inode_info *pipe;
- long ret, bytes;
+ ssize_t ret, bytes;
size_t len;
int i, flags, more;
@@ -1066,7 +1086,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
- ret = vfs_splice_read(in, &pos, pipe, len, flags);
+ ret = do_splice_read(in, &pos, pipe, len, flags);
if (unlikely(ret <= 0))
goto read_failure;
@@ -1138,9 +1158,20 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
struct splice_desc *sd)
{
struct file *file = sd->u.file;
+ long ret;
+
+ file_start_write(file);
+ ret = do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags);
+ file_end_write(file);
+ return ret;
+}
+
+static int splice_file_range_actor(struct pipe_inode_info *pipe,
+ struct splice_desc *sd)
+{
+ struct file *file = sd->u.file;
- return do_splice_from(pipe, file, sd->opos, sd->total_len,
- sd->flags);
+ return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags);
}
static void direct_file_splice_eof(struct splice_desc *sd)
@@ -1151,24 +1182,10 @@ static void direct_file_splice_eof(struct splice_desc *sd)
file->f_op->splice_eof(file);
}
-/**
- * do_splice_direct - splices data directly between two files
- * @in: file to splice from
- * @ppos: input file offset
- * @out: file to splice to
- * @opos: output file offset
- * @len: number of bytes to splice
- * @flags: splice modifier flags
- *
- * Description:
- * For use by do_sendfile(). splice can easily emulate sendfile, but
- * doing it in the application would incur an extra system call
- * (splice in + splice out, as compared to just sendfile()). So this helper
- * can splice directly through a process-private pipe.
- *
- */
-long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags)
+static ssize_t do_splice_direct_actor(struct file *in, loff_t *ppos,
+ struct file *out, loff_t *opos,
+ size_t len, unsigned int flags,
+ splice_direct_actor *actor)
{
struct splice_desc sd = {
.len = len,
@@ -1179,7 +1196,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
.splice_eof = direct_file_splice_eof,
.opos = opos,
};
- long ret;
+ ssize_t ret;
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
@@ -1187,18 +1204,63 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
if (unlikely(out->f_flags & O_APPEND))
return -EINVAL;
- ret = rw_verify_area(WRITE, out, opos, len);
- if (unlikely(ret < 0))
- return ret;
-
- ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
+ ret = splice_direct_to_actor(in, &sd, actor);
if (ret > 0)
*ppos = sd.pos;
return ret;
}
+/**
+ * do_splice_direct - splices data directly between two files
+ * @in: file to splice from
+ * @ppos: input file offset
+ * @out: file to splice to
+ * @opos: output file offset
+ * @len: number of bytes to splice
+ * @flags: splice modifier flags
+ *
+ * Description:
+ * For use by do_sendfile(). splice can easily emulate sendfile, but
+ * doing it in the application would incur an extra system call
+ * (splice in + splice out, as compared to just sendfile()). So this helper
+ * can splice directly through a process-private pipe.
+ *
+ * Callers already called rw_verify_area() on the entire range.
+ */
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags)
+{
+ return do_splice_direct_actor(in, ppos, out, opos, len, flags,
+ direct_splice_actor);
+}
EXPORT_SYMBOL(do_splice_direct);
+/**
+ * splice_file_range - splices data between two files for copy_file_range()
+ * @in: file to splice from
+ * @ppos: input file offset
+ * @out: file to splice to
+ * @opos: output file offset
+ * @len: number of bytes to splice
+ *
+ * Description:
+ * For use by ->copy_file_range() methods.
+ * Like do_splice_direct(), but vfs_copy_file_range() already holds
+ * start_file_write() on @out file.
+ *
+ * Callers already called rw_verify_area() on the entire range.
+ */
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len)
+{
+ lockdep_assert(file_write_started(out));
+
+ return do_splice_direct_actor(in, ppos, out, opos,
+ min_t(size_t, len, MAX_RW_COUNT),
+ 0, splice_file_range_actor);
+}
+EXPORT_SYMBOL(splice_file_range);
+
static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
{
for (;;) {
@@ -1220,17 +1282,17 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
-long splice_file_to_pipe(struct file *in,
- struct pipe_inode_info *opipe,
- loff_t *offset,
- size_t len, unsigned int flags)
+ssize_t splice_file_to_pipe(struct file *in,
+ struct pipe_inode_info *opipe,
+ loff_t *offset,
+ size_t len, unsigned int flags)
{
- long ret;
+ ssize_t ret;
pipe_lock(opipe);
ret = wait_for_space(opipe, flags);
if (!ret)
- ret = vfs_splice_read(in, offset, opipe, len, flags);
+ ret = do_splice_read(in, offset, opipe, len, flags);
pipe_unlock(opipe);
if (ret > 0)
wakeup_pipe_readers(opipe);
@@ -1240,13 +1302,13 @@ long splice_file_to_pipe(struct file *in,
/*
* Determine where to splice to/from.
*/
-long do_splice(struct file *in, loff_t *off_in, struct file *out,
- loff_t *off_out, size_t len, unsigned int flags)
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags)
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
loff_t offset;
- long ret;
+ ssize_t ret;
if (unlikely(!(in->f_mode & FMODE_READ) ||
!(out->f_mode & FMODE_WRITE)))
@@ -1307,6 +1369,10 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
offset = in->f_pos;
}
+ ret = rw_verify_area(READ, in, &offset, len);
+ if (unlikely(ret < 0))
+ return ret;
+
if (out->f_flags & O_NONBLOCK)
flags |= SPLICE_F_NONBLOCK;
@@ -1333,14 +1399,14 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
return ret;
}
-static long __do_splice(struct file *in, loff_t __user *off_in,
- struct file *out, loff_t __user *off_out,
- size_t len, unsigned int flags)
+static ssize_t __do_splice(struct file *in, loff_t __user *off_in,
+ struct file *out, loff_t __user *off_out,
+ size_t len, unsigned int flags)
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
loff_t offset, *__off_in = NULL, *__off_out = NULL;
- long ret;
+ ssize_t ret;
ipipe = get_pipe_info(in, true);
opipe = get_pipe_info(out, true);
@@ -1379,16 +1445,16 @@ static long __do_splice(struct file *in, loff_t __user *off_in,
return ret;
}
-static int iter_to_pipe(struct iov_iter *from,
- struct pipe_inode_info *pipe,
- unsigned flags)
+static ssize_t iter_to_pipe(struct iov_iter *from,
+ struct pipe_inode_info *pipe,
+ unsigned int flags)
{
struct pipe_buffer buf = {
.ops = &user_page_pipe_buf_ops,
.flags = flags
};
size_t total = 0;
- int ret = 0;
+ ssize_t ret = 0;
while (iov_iter_count(from)) {
struct page *pages[16];
@@ -1437,8 +1503,8 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
* For lack of a better implementation, implement vmsplice() to userspace
* as a simple copy of the pipes pages to the user iov.
*/
-static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
- unsigned int flags)
+static ssize_t vmsplice_to_user(struct file *file, struct iov_iter *iter,
+ unsigned int flags)
{
struct pipe_inode_info *pipe = get_pipe_info(file, true);
struct splice_desc sd = {
@@ -1446,7 +1512,7 @@ static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
.flags = flags,
.u.data = iter
};
- long ret = 0;
+ ssize_t ret = 0;
if (!pipe)
return -EBADF;
@@ -1470,11 +1536,11 @@ static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
-static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
- unsigned int flags)
+static ssize_t vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
+ unsigned int flags)
{
struct pipe_inode_info *pipe;
- long ret = 0;
+ ssize_t ret = 0;
unsigned buf_flag = 0;
if (flags & SPLICE_F_GIFT)
@@ -1570,7 +1636,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
size_t, len, unsigned int, flags)
{
struct fd in, out;
- long error;
+ ssize_t error;
if (unlikely(!len))
return 0;
@@ -1584,7 +1650,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
out = fdget(fd_out);
if (out.file) {
error = __do_splice(in.file, off_in, out.file, off_out,
- len, flags);
+ len, flags);
fdput(out);
}
fdput(in);
@@ -1807,15 +1873,15 @@ retry:
/*
* Link contents of ipipe to opipe.
*/
-static int link_pipe(struct pipe_inode_info *ipipe,
- struct pipe_inode_info *opipe,
- size_t len, unsigned int flags)
+static ssize_t link_pipe(struct pipe_inode_info *ipipe,
+ struct pipe_inode_info *opipe,
+ size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
unsigned int i_head, o_head;
unsigned int i_tail, o_tail;
unsigned int i_mask, o_mask;
- int ret = 0;
+ ssize_t ret = 0;
/*
* Potential ABBA deadlock, work around it by ordering lock
@@ -1898,11 +1964,12 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* The 'flags' used are the SPLICE_F_* variants, currently the only
* applicable one is SPLICE_F_NONBLOCK.
*/
-long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags)
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags)
{
struct pipe_inode_info *ipipe = get_pipe_info(in, true);
struct pipe_inode_info *opipe = get_pipe_info(out, true);
- int ret = -EINVAL;
+ ssize_t ret = -EINVAL;
if (unlikely(!(in->f_mode & FMODE_READ) ||
!(out->f_mode & FMODE_WRITE)))
@@ -1939,7 +2006,7 @@ long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags)
SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
{
struct fd in, out;
- int error;
+ ssize_t error;
if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 8ba8c4c50770..e8df6430444b 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -544,7 +544,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
struct squashfs_page_actor *actor;
unsigned int nr_pages = 0;
struct page **pages;
- int i, file_end = i_size_read(inode) >> msblk->block_log;
+ int i;
+ loff_t file_end = i_size_read(inode) >> msblk->block_log;
unsigned int max_pages = 1UL << shift;
readahead_expand(ractl, start, (len | mask) + 1);
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index f1ccad519e28..763a3f7a75f6 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -26,10 +26,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int start_index = target_page->index & ~mask;
- int end_index = start_index | mask;
+ loff_t start_index = target_page->index & ~mask;
+ loff_t end_index = start_index | mask;
int i, n, pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
diff --git a/fs/stat.c b/fs/stat.c
index f721d26ec3f7..77cdc69eb422 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -41,7 +41,7 @@
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before filling in the
* uid and gid filds. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs @nop_mnt_idmap.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*/
void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
struct inode *inode, struct kstat *stat)
@@ -247,8 +247,13 @@ retry:
error = vfs_getattr(&path, stat, request_mask, flags);
- stat->mnt_id = real_mount(path.mnt)->mnt_id;
- stat->result_mask |= STATX_MNT_ID;
+ if (request_mask & STATX_MNT_ID_UNIQUE) {
+ stat->mnt_id = real_mount(path.mnt)->mnt_id_unique;
+ stat->result_mask |= STATX_MNT_ID_UNIQUE;
+ } else {
+ stat->mnt_id = real_mount(path.mnt)->mnt_id;
+ stat->result_mask |= STATX_MNT_ID;
+ }
if (path.mnt->mnt_root == path.dentry)
stat->attributes |= STATX_ATTR_MOUNT_ROOT;
diff --git a/fs/super.c b/fs/super.c
index 076392396e72..e35936000408 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -81,16 +81,13 @@ static inline void super_unlock_shared(struct super_block *sb)
super_unlock(sb, false);
}
-static inline bool wait_born(struct super_block *sb)
+static bool super_flags(const struct super_block *sb, unsigned int flags)
{
- unsigned int flags;
-
/*
* Pairs with smp_store_release() in super_wake() and ensures
- * that we see SB_BORN or SB_DYING after we're woken.
+ * that we see @flags after we're woken.
*/
- flags = smp_load_acquire(&sb->s_flags);
- return flags & (SB_BORN | SB_DYING);
+ return smp_load_acquire(&sb->s_flags) & flags;
}
/**
@@ -105,15 +102,21 @@ static inline bool wait_born(struct super_block *sb)
*
* The caller must have acquired a temporary reference on @sb->s_count.
*
- * Return: This returns true if SB_BORN was set, false if SB_DYING was
- * set. The function acquires s_umount and returns with it held.
+ * Return: The function returns true if SB_BORN was set and with
+ * s_umount held. The function returns false if SB_DYING was
+ * set and without s_umount held.
*/
static __must_check bool super_lock(struct super_block *sb, bool excl)
{
-
lockdep_assert_not_held(&sb->s_umount);
-relock:
+ /* wait until the superblock is ready or dying */
+ wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
+
+ /* Don't pointlessly acquire s_umount. */
+ if (super_flags(sb, SB_DYING))
+ return false;
+
__super_lock(sb, excl);
/*
@@ -121,32 +124,22 @@ relock:
* @sb->s_root is NULL and @sb->s_active is 0. No one needs to
* grab a reference to this. Tell them so.
*/
- if (sb->s_flags & SB_DYING)
+ if (sb->s_flags & SB_DYING) {
+ super_unlock(sb, excl);
return false;
+ }
- /* Has called ->get_tree() successfully. */
- if (sb->s_flags & SB_BORN)
- return true;
-
- super_unlock(sb, excl);
-
- /* wait until the superblock is ready or dying */
- wait_var_event(&sb->s_flags, wait_born(sb));
-
- /*
- * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
- * Just reacquire @sb->s_umount for the caller.
- */
- goto relock;
+ WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
+ return true;
}
-/* wait and acquire read-side of @sb->s_umount */
+/* wait and try to acquire read-side of @sb->s_umount */
static inline bool super_lock_shared(struct super_block *sb)
{
return super_lock(sb, false);
}
-/* wait and acquire write-side of @sb->s_umount */
+/* wait and try to acquire write-side of @sb->s_umount */
static inline bool super_lock_excl(struct super_block *sb)
{
return super_lock(sb, true);
@@ -323,7 +316,7 @@ static void destroy_unused_super(struct super_block *s)
static struct super_block *alloc_super(struct file_system_type *type, int flags,
struct user_namespace *user_ns)
{
- struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
+ struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
static const struct super_operations default_op;
int i;
@@ -521,48 +514,7 @@ void deactivate_super(struct super_block *s)
EXPORT_SYMBOL(deactivate_super);
/**
- * grab_super - acquire an active reference
- * @s: reference we are trying to make active
- *
- * Tries to acquire an active reference. grab_super() is used when we
- * had just found a superblock in super_blocks or fs_type->fs_supers
- * and want to turn it into a full-blown active reference. grab_super()
- * is called with sb_lock held and drops it. Returns 1 in case of
- * success, 0 if we had failed (superblock contents was already dead or
- * dying when grab_super() had been called). Note that this is only
- * called for superblocks not in rundown mode (== ones still on ->fs_supers
- * of their type), so increment of ->s_count is OK here.
- */
-static int grab_super(struct super_block *s) __releases(sb_lock)
-{
- bool born;
-
- s->s_count++;
- spin_unlock(&sb_lock);
- born = super_lock_excl(s);
- if (born && atomic_inc_not_zero(&s->s_active)) {
- put_super(s);
- return 1;
- }
- super_unlock_excl(s);
- put_super(s);
- return 0;
-}
-
-static inline bool wait_dead(struct super_block *sb)
-{
- unsigned int flags;
-
- /*
- * Pairs with memory barrier in super_wake() and ensures
- * that we see SB_DEAD after we're woken.
- */
- flags = smp_load_acquire(&sb->s_flags);
- return flags & SB_DEAD;
-}
-
-/**
- * grab_super_dead - acquire an active reference to a superblock
+ * grab_super - acquire an active reference to a superblock
* @sb: superblock to acquire
*
* Acquire a temporary reference on a superblock and try to trade it for
@@ -573,17 +525,21 @@ static inline bool wait_dead(struct super_block *sb)
* Return: This returns true if an active reference could be acquired,
* false if not.
*/
-static bool grab_super_dead(struct super_block *sb)
+static bool grab_super(struct super_block *sb)
{
+ bool locked;
sb->s_count++;
- if (grab_super(sb)) {
- put_super(sb);
- lockdep_assert_held(&sb->s_umount);
- return true;
+ spin_unlock(&sb_lock);
+ locked = super_lock_excl(sb);
+ if (locked) {
+ if (atomic_inc_not_zero(&sb->s_active)) {
+ put_super(sb);
+ return true;
+ }
+ super_unlock_excl(sb);
}
- wait_var_event(&sb->s_flags, wait_dead(sb));
- lockdep_assert_not_held(&sb->s_umount);
+ wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
put_super(sb);
return false;
}
@@ -834,7 +790,7 @@ share_extant_sb:
warnfc(fc, "reusing existing filesystem in another namespace not allowed");
return ERR_PTR(-EBUSY);
}
- if (!grab_super_dead(old))
+ if (!grab_super(old))
goto retry;
destroy_unused_super(s);
return old;
@@ -878,7 +834,7 @@ retry:
destroy_unused_super(s);
return ERR_PTR(-EBUSY);
}
- if (!grab_super_dead(old))
+ if (!grab_super(old))
goto retry;
destroy_unused_super(s);
return old;
@@ -930,8 +886,7 @@ static void __iterate_supers(void (*f)(struct super_block *))
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- /* Pairs with memory marrier in super_wake(). */
- if (smp_load_acquire(&sb->s_flags) & SB_DYING)
+ if (super_flags(sb, SB_DYING))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
@@ -961,15 +916,17 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- bool born;
+ bool locked;
sb->s_count++;
spin_unlock(&sb_lock);
- born = super_lock_shared(sb);
- if (born && sb->s_root)
- f(sb, arg);
- super_unlock_shared(sb);
+ locked = super_lock_shared(sb);
+ if (locked) {
+ if (sb->s_root)
+ f(sb, arg);
+ super_unlock_shared(sb);
+ }
spin_lock(&sb_lock);
if (p)
@@ -997,15 +954,17 @@ void iterate_supers_type(struct file_system_type *type,
spin_lock(&sb_lock);
hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
- bool born;
+ bool locked;
sb->s_count++;
spin_unlock(&sb_lock);
- born = super_lock_shared(sb);
- if (born && sb->s_root)
- f(sb, arg);
- super_unlock_shared(sb);
+ locked = super_lock_shared(sb);
+ if (locked) {
+ if (sb->s_root)
+ f(sb, arg);
+ super_unlock_shared(sb);
+ }
spin_lock(&sb_lock);
if (p)
@@ -1019,34 +978,6 @@ void iterate_supers_type(struct file_system_type *type,
EXPORT_SYMBOL(iterate_supers_type);
-/**
- * get_active_super - get an active reference to the superblock of a device
- * @bdev: device to get the superblock for
- *
- * Scans the superblock list and finds the superblock of the file system
- * mounted on the device given. Returns the superblock with an active
- * reference or %NULL if none was found.
- */
-struct super_block *get_active_super(struct block_device *bdev)
-{
- struct super_block *sb;
-
- if (!bdev)
- return NULL;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (sb->s_bdev == bdev) {
- if (!grab_super(sb))
- return NULL;
- super_unlock_excl(sb);
- return sb;
- }
- }
- spin_unlock(&sb_lock);
- return NULL;
-}
-
struct super_block *user_get_super(dev_t dev, bool excl)
{
struct super_block *sb;
@@ -1054,15 +985,17 @@ struct super_block *user_get_super(dev_t dev, bool excl)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
if (sb->s_dev == dev) {
- bool born;
+ bool locked;
sb->s_count++;
spin_unlock(&sb_lock);
/* still alive? */
- born = super_lock(sb, excl);
- if (born && sb->s_root)
- return sb;
- super_unlock(sb, excl);
+ locked = super_lock(sb, excl);
+ if (locked) {
+ if (sb->s_root)
+ return sb;
+ super_unlock(sb, excl);
+ }
/* nope, got unmounted */
spin_lock(&sb_lock);
__put_super(sb);
@@ -1173,9 +1106,9 @@ cancel_readonly:
static void do_emergency_remount_callback(struct super_block *sb)
{
- bool born = super_lock_excl(sb);
+ bool locked = super_lock_excl(sb);
- if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
+ if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
struct fs_context *fc;
fc = fs_context_for_reconfigure(sb->s_root,
@@ -1186,7 +1119,8 @@ static void do_emergency_remount_callback(struct super_block *sb)
put_fs_context(fc);
}
}
- super_unlock_excl(sb);
+ if (locked)
+ super_unlock_excl(sb);
}
static void do_emergency_remount(struct work_struct *work)
@@ -1209,16 +1143,17 @@ void emergency_remount(void)
static void do_thaw_all_callback(struct super_block *sb)
{
- bool born = super_lock_excl(sb);
+ bool locked = super_lock_excl(sb);
- if (born && sb->s_root) {
+ if (locked && sb->s_root) {
if (IS_ENABLED(CONFIG_BLOCK))
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
+ while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
- } else {
- super_unlock_excl(sb);
+ return;
}
+ if (locked)
+ super_unlock_excl(sb);
}
static void do_thaw_all(struct work_struct *work)
@@ -1428,11 +1363,11 @@ EXPORT_SYMBOL(sget_dev);
*
* The function must be called with bdev->bd_holder_lock and releases it.
*/
-static struct super_block *bdev_super_lock_shared(struct block_device *bdev)
+static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
__releases(&bdev->bd_holder_lock)
{
struct super_block *sb = bdev->bd_holder;
- bool born;
+ bool locked;
lockdep_assert_held(&bdev->bd_holder_lock);
lockdep_assert_not_held(&sb->s_umount);
@@ -1442,19 +1377,25 @@ static struct super_block *bdev_super_lock_shared(struct block_device *bdev)
spin_lock(&sb_lock);
sb->s_count++;
spin_unlock(&sb_lock);
+
mutex_unlock(&bdev->bd_holder_lock);
- born = super_lock_shared(sb);
- if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
- super_unlock_shared(sb);
- put_super(sb);
- return NULL;
- }
+ locked = super_lock(sb, excl);
+
/*
- * The superblock is active and we hold s_umount, we can drop our
- * temporary reference now.
- */
+ * If the superblock wasn't already SB_DYING then we hold
+ * s_umount and can safely drop our temporary reference.
+ */
put_super(sb);
+
+ if (!locked)
+ return NULL;
+
+ if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
+ super_unlock(sb, excl);
+ return NULL;
+ }
+
return sb;
}
@@ -1462,7 +1403,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
{
struct super_block *sb;
- sb = bdev_super_lock_shared(bdev);
+ sb = bdev_super_lock(bdev, false);
if (!sb)
return;
@@ -1480,16 +1421,110 @@ static void fs_bdev_sync(struct block_device *bdev)
{
struct super_block *sb;
- sb = bdev_super_lock_shared(bdev);
+ sb = bdev_super_lock(bdev, false);
if (!sb)
return;
+
sync_filesystem(sb);
super_unlock_shared(sb);
}
+static struct super_block *get_bdev_super(struct block_device *bdev)
+{
+ bool active = false;
+ struct super_block *sb;
+
+ sb = bdev_super_lock(bdev, true);
+ if (sb) {
+ active = atomic_inc_not_zero(&sb->s_active);
+ super_unlock_excl(sb);
+ }
+ if (!active)
+ return NULL;
+ return sb;
+}
+
+/**
+ * fs_bdev_freeze - freeze owning filesystem of block device
+ * @bdev: block device
+ *
+ * Freeze the filesystem that owns this block device if it is still
+ * active.
+ *
+ * A filesystem that owns multiple block devices may be frozen from each
+ * block device and won't be unfrozen until all block devices are
+ * unfrozen. Each block device can only freeze the filesystem once as we
+ * nest freezes for block devices in the block layer.
+ *
+ * Return: If the freeze was successful zero is returned. If the freeze
+ * failed a negative error code is returned.
+ */
+static int fs_bdev_freeze(struct block_device *bdev)
+{
+ struct super_block *sb;
+ int error = 0;
+
+ lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
+
+ sb = get_bdev_super(bdev);
+ if (!sb)
+ return -EINVAL;
+
+ if (sb->s_op->freeze_super)
+ error = sb->s_op->freeze_super(sb,
+ FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
+ else
+ error = freeze_super(sb,
+ FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
+ if (!error)
+ error = sync_blockdev(bdev);
+ deactivate_super(sb);
+ return error;
+}
+
+/**
+ * fs_bdev_thaw - thaw owning filesystem of block device
+ * @bdev: block device
+ *
+ * Thaw the filesystem that owns this block device.
+ *
+ * A filesystem that owns multiple block devices may be frozen from each
+ * block device and won't be unfrozen until all block devices are
+ * unfrozen. Each block device can only freeze the filesystem once as we
+ * nest freezes for block devices in the block layer.
+ *
+ * Return: If the thaw was successful zero is returned. If the thaw
+ * failed a negative error code is returned. If this function
+ * returns zero it doesn't mean that the filesystem is unfrozen
+ * as it may have been frozen multiple times (kernel may hold a
+ * freeze or might be frozen from other block devices).
+ */
+static int fs_bdev_thaw(struct block_device *bdev)
+{
+ struct super_block *sb;
+ int error;
+
+ lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
+
+ sb = get_bdev_super(bdev);
+ if (WARN_ON_ONCE(!sb))
+ return -EINVAL;
+
+ if (sb->s_op->thaw_super)
+ error = sb->s_op->thaw_super(sb,
+ FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
+ else
+ error = thaw_super(sb,
+ FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
+ deactivate_super(sb);
+ return error;
+}
+
const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync,
+ .freeze = fs_bdev_freeze,
+ .thaw = fs_bdev_thaw,
};
EXPORT_SYMBOL_GPL(fs_holder_ops);
@@ -1519,15 +1554,10 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
}
/*
- * Until SB_BORN flag is set, there can be no active superblock
- * references and thus no filesystem freezing. get_active_super() will
- * just loop waiting for SB_BORN so even freeze_bdev() cannot proceed.
- *
- * It is enough to check bdev was not frozen before we set s_bdev.
+ * It is enough to check bdev was not frozen before we set
+ * s_bdev as freezing will wait until SB_BORN is set.
*/
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
bdev_release(bdev_handle);
@@ -1540,7 +1570,6 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
if (bdev_stable_writes(bdev))
sb->s_iflags |= SB_I_STABLE_WRITES;
spin_unlock(&sb_lock);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
@@ -1585,15 +1614,7 @@ int get_tree_bdev(struct fs_context *fc,
return -EBUSY;
}
} else {
- /*
- * We drop s_umount here because we need to open the bdev and
- * bdev->open_mutex ranks above s_umount (blkdev_put() ->
- * bdev_mark_dead()). It is safe because we have active sb
- * reference and SB_BORN is not set yet.
- */
- super_unlock_excl(s);
error = setup_bdev_super(s, fc->sb_flags, fc);
- __super_lock_excl(s);
if (!error)
error = fill_super(s, fc);
if (error) {
@@ -1637,15 +1658,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
return ERR_PTR(-EBUSY);
}
} else {
- /*
- * We drop s_umount here because we need to open the bdev and
- * bdev->open_mutex ranks above s_umount (blkdev_put() ->
- * bdev_mark_dead()). It is safe because we have active sb
- * reference and SB_BORN is not set yet.
- */
- super_unlock_excl(s);
error = setup_bdev_super(s, flags, NULL);
- __super_lock_excl(s);
if (!error)
error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
if (error) {
@@ -1914,6 +1927,47 @@ static int wait_for_partially_frozen(struct super_block *sb)
return ret;
}
+#define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
+#define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST)
+
+static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
+{
+ WARN_ON_ONCE((who & ~FREEZE_FLAGS));
+ WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
+
+ if (who & FREEZE_HOLDER_KERNEL)
+ ++sb->s_writers.freeze_kcount;
+ if (who & FREEZE_HOLDER_USERSPACE)
+ ++sb->s_writers.freeze_ucount;
+ return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
+}
+
+static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
+{
+ WARN_ON_ONCE((who & ~FREEZE_FLAGS));
+ WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
+
+ if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
+ --sb->s_writers.freeze_kcount;
+ if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
+ --sb->s_writers.freeze_ucount;
+ return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
+}
+
+static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
+{
+ WARN_ON_ONCE((who & ~FREEZE_FLAGS));
+ WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
+
+ if (who & FREEZE_HOLDER_KERNEL)
+ return (who & FREEZE_MAY_NEST) ||
+ sb->s_writers.freeze_kcount == 0;
+ if (who & FREEZE_HOLDER_USERSPACE)
+ return (who & FREEZE_MAY_NEST) ||
+ sb->s_writers.freeze_ucount == 0;
+ return false;
+}
+
/**
* freeze_super - lock the filesystem and force it into a consistent state
* @sb: the super to lock
@@ -1926,6 +1980,7 @@ static int wait_for_partially_frozen(struct super_block *sb)
* @who should be:
* * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
* * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
+ * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
*
* The @who argument distinguishes between the kernel and userspace trying to
* freeze the filesystem. Although there cannot be multiple kernel freezes or
@@ -1933,6 +1988,13 @@ static int wait_for_partially_frozen(struct super_block *sb)
* userspace can both hold a filesystem frozen. The filesystem remains frozen
* until there are no kernel or userspace freezes in effect.
*
+ * A filesystem may hold multiple devices and thus a filesystems may be
+ * frozen through the block layer via multiple block devices. In this
+ * case the request is marked as being allowed to nest by passing
+ * FREEZE_MAY_NEST. The filesystem remains frozen until all block
+ * devices are unfrozen. If multiple freezes are attempted without
+ * FREEZE_MAY_NEST -EBUSY will be returned.
+ *
* During this function, sb->s_writers.frozen goes through these values:
*
* SB_UNFROZEN: File system is normal, all writes progress as usual.
@@ -1957,31 +2019,29 @@ static int wait_for_partially_frozen(struct super_block *sb)
* mostly auxiliary for filesystems to verify they do not modify frozen fs.
*
* sb->s_writers.frozen is protected by sb->s_umount.
+ *
+ * Return: If the freeze was successful zero is returned. If the freeze
+ * failed a negative error code is returned.
*/
int freeze_super(struct super_block *sb, enum freeze_holder who)
{
int ret;
+ if (!super_lock_excl(sb)) {
+ WARN_ON_ONCE("Dying superblock while freezing!");
+ return -EINVAL;
+ }
atomic_inc(&sb->s_active);
- if (!super_lock_excl(sb))
- WARN(1, "Dying superblock while freezing!");
retry:
if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
- if (sb->s_writers.freeze_holders & who) {
- deactivate_locked_super(sb);
- return -EBUSY;
- }
-
- WARN_ON(sb->s_writers.freeze_holders == 0);
-
- /*
- * Someone else already holds this type of freeze; share the
- * freeze and assign the active ref to the freeze.
- */
- sb->s_writers.freeze_holders |= who;
- super_unlock_excl(sb);
- return 0;
+ if (may_freeze(sb, who))
+ ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
+ else
+ ret = -EBUSY;
+ /* All freezers share a single active reference. */
+ deactivate_locked_super(sb);
+ return ret;
}
if (sb->s_writers.frozen != SB_UNFROZEN) {
@@ -1994,14 +2054,9 @@ retry:
goto retry;
}
- if (!(sb->s_flags & SB_BORN)) {
- super_unlock_excl(sb);
- return 0; /* sic - it's "nothing to do" */
- }
-
if (sb_rdonly(sb)) {
/* Nothing to do really... */
- sb->s_writers.freeze_holders |= who;
+ WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
super_unlock_excl(sb);
@@ -2012,8 +2067,7 @@ retry:
/* Release s_umount to preserve sb_start_write -> s_umount ordering */
super_unlock_excl(sb);
sb_wait_write(sb, SB_FREEZE_WRITE);
- if (!super_lock_excl(sb))
- WARN(1, "Dying superblock while freezing!");
+ __super_lock_excl(sb);
/* Now we go and block page faults... */
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
@@ -2049,7 +2103,7 @@ retry:
* For debugging purposes so that fs can warn if it sees write activity
* when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
- sb->s_writers.freeze_holders |= who;
+ WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
lockdep_sb_freeze_release(sb);
@@ -2066,34 +2120,22 @@ EXPORT_SYMBOL(freeze_super);
*/
static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
{
- int error;
+ int error = -EINVAL;
- if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
- if (!(sb->s_writers.freeze_holders & who)) {
- super_unlock_excl(sb);
- return -EINVAL;
- }
+ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
+ goto out_unlock;
- /*
- * Freeze is shared with someone else. Release our hold and
- * drop the active ref that freeze_super assigned to the
- * freezer.
- */
- if (sb->s_writers.freeze_holders & ~who) {
- sb->s_writers.freeze_holders &= ~who;
- deactivate_locked_super(sb);
- return 0;
- }
- } else {
- super_unlock_excl(sb);
- return -EINVAL;
- }
+ /*
+ * All freezers share a single active reference.
+ * So just unlock in case there are any left.
+ */
+ if (freeze_dec(sb, who))
+ goto out_unlock;
if (sb_rdonly(sb)) {
- sb->s_writers.freeze_holders &= ~who;
sb->s_writers.frozen = SB_UNFROZEN;
wake_up_var(&sb->s_writers.frozen);
- goto out;
+ goto out_deactivate;
}
lockdep_sb_freeze_acquire(sb);
@@ -2101,20 +2143,23 @@ static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
- printk(KERN_ERR "VFS:Filesystem thaw failed\n");
+ pr_err("VFS: Filesystem thaw failed\n");
+ freeze_inc(sb, who);
lockdep_sb_freeze_release(sb);
- super_unlock_excl(sb);
- return error;
+ goto out_unlock;
}
}
- sb->s_writers.freeze_holders &= ~who;
sb->s_writers.frozen = SB_UNFROZEN;
wake_up_var(&sb->s_writers.frozen);
sb_freeze_unlock(sb, SB_FREEZE_FS);
-out:
+out_deactivate:
deactivate_locked_super(sb);
return 0;
+
+out_unlock:
+ super_unlock_excl(sb);
+ return error;
}
/**
@@ -2128,11 +2173,18 @@ out:
* @who should be:
* * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
* * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
+ * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
+ *
+ * A filesystem may hold multiple devices and thus a filesystems may
+ * have been frozen through the block layer via multiple block devices.
+ * The filesystem remains frozen until all block devices are unfrozen.
*/
int thaw_super(struct super_block *sb, enum freeze_holder who)
{
- if (!super_lock_excl(sb))
- WARN(1, "Dying superblock while thawing!");
+ if (!super_lock_excl(sb)) {
+ WARN_ON_ONCE("Dying superblock while thawing!");
+ return -EINVAL;
+ }
return thaw_super_locked(sb, who);
}
EXPORT_SYMBOL(thaw_super);
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 725981474e5f..410ab2a44d2f 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -8,6 +8,7 @@
#include <linux/buffer_head.h>
#include <linux/mount.h>
+#include <linux/mpage.h>
#include <linux/string.h>
#include "sysv.h"
@@ -456,9 +457,10 @@ int sysv_getattr(struct mnt_idmap *idmap, const struct path *path,
return 0;
}
-static int sysv_writepage(struct page *page, struct writeback_control *wbc)
+static int sysv_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page,get_block,wbc);
+ return mpage_writepages(mapping, wbc, get_block);
}
static int sysv_read_folio(struct file *file, struct folio *folio)
@@ -503,8 +505,9 @@ const struct address_space_operations sysv_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = sysv_read_folio,
- .writepage = sysv_writepage,
+ .writepages = sysv_writepages,
.write_begin = sysv_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = sysv_bmap
};
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 62524b20964e..bc86ffdb103b 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -215,6 +215,10 @@ resume:
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
+ /* Note, getdents() can add a cursor dentry with no inode */
+ if (!dentry->d_inode)
+ continue;
+
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
change_gid(dentry, gid);
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index 899e447778ac..42bdeb471a07 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -63,7 +63,7 @@ struct eventfs_inode {
};
unsigned int is_freed:1;
unsigned int is_events:1;
- unsigned int nr_entries:31;
+ unsigned int nr_entries:30;
};
static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index ebce93b08281..a7bb2e63cdde 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -35,6 +35,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/buffer_head.h>
+#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/iversion.h>
@@ -390,7 +391,7 @@ out:
/**
* ufs_getfrag_block() - `get_block_t' function, interface between UFS and
- * read_folio, writepage and so on
+ * read_folio, writepages and so on
*/
static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
@@ -467,9 +468,10 @@ done:
return 0;
}
-static int ufs_writepage(struct page *page, struct writeback_control *wbc)
+static int ufs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page,ufs_getfrag_block,wbc);
+ return mpage_writepages(mapping, wbc, ufs_getfrag_block);
}
static int ufs_read_folio(struct file *file, struct folio *folio)
@@ -528,9 +530,10 @@ const struct address_space_operations ufs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = ufs_read_folio,
- .writepage = ufs_writepage,
+ .writepages = ufs_writepages,
.write_begin = ufs_write_begin,
.write_end = ufs_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = ufs_bmap
};
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e8af40b05549..6e2a4d6a0d8f 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -2005,6 +2005,75 @@ static inline unsigned int uffd_ctx_features(__u64 user_features)
return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
}
+static int userfaultfd_move(struct userfaultfd_ctx *ctx,
+ unsigned long arg)
+{
+ __s64 ret;
+ struct uffdio_move uffdio_move;
+ struct uffdio_move __user *user_uffdio_move;
+ struct userfaultfd_wake_range range;
+ struct mm_struct *mm = ctx->mm;
+
+ user_uffdio_move = (struct uffdio_move __user *) arg;
+
+ if (atomic_read(&ctx->mmap_changing))
+ return -EAGAIN;
+
+ if (copy_from_user(&uffdio_move, user_uffdio_move,
+ /* don't copy "move" last field */
+ sizeof(uffdio_move)-sizeof(__s64)))
+ return -EFAULT;
+
+ /* Do not allow cross-mm moves. */
+ if (mm != current->mm)
+ return -EINVAL;
+
+ ret = validate_range(mm, uffdio_move.dst, uffdio_move.len);
+ if (ret)
+ return ret;
+
+ ret = validate_range(mm, uffdio_move.src, uffdio_move.len);
+ if (ret)
+ return ret;
+
+ if (uffdio_move.mode & ~(UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES|
+ UFFDIO_MOVE_MODE_DONTWAKE))
+ return -EINVAL;
+
+ if (mmget_not_zero(mm)) {
+ mmap_read_lock(mm);
+
+ /* Re-check after taking mmap_lock */
+ if (likely(!atomic_read(&ctx->mmap_changing)))
+ ret = move_pages(ctx, mm, uffdio_move.dst, uffdio_move.src,
+ uffdio_move.len, uffdio_move.mode);
+ else
+ ret = -EINVAL;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+ } else {
+ return -ESRCH;
+ }
+
+ if (unlikely(put_user(ret, &user_uffdio_move->move)))
+ return -EFAULT;
+ if (ret < 0)
+ goto out;
+
+ /* len == 0 would wake all */
+ VM_WARN_ON(!ret);
+ range.len = ret;
+ if (!(uffdio_move.mode & UFFDIO_MOVE_MODE_DONTWAKE)) {
+ range.start = uffdio_move.dst;
+ wake_userfault(ctx, &range);
+ }
+ ret = range.len == uffdio_move.len ? 0 : -EAGAIN;
+
+out:
+ return ret;
+}
+
/*
* userland asks for a certain API version and we return which bits
* and ioctl commands are implemented in this kernel for such API
@@ -2097,6 +2166,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
case UFFDIO_ZEROPAGE:
ret = userfaultfd_zeropage(ctx, arg);
break;
+ case UFFDIO_MOVE:
+ ret = userfaultfd_move(ctx, arg);
+ break;
case UFFDIO_WRITEPROTECT:
ret = userfaultfd_writeprotect(ctx, arg);
break;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 465d7630bb21..813f85156b0c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -584,7 +584,7 @@ const struct address_space_operations xfs_address_space_operations = {
.bmap = xfs_vm_bmap,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = xfs_iomap_swapfile_activate,
};
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 545c7991b9b5..669332849680 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -169,7 +169,7 @@ xfs_buf_stale(
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
- (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
+ (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
atomic_dec(&bp->b_hold);
ASSERT(atomic_read(&bp->b_hold) >= 1);
@@ -1047,7 +1047,7 @@ xfs_buf_rele(
* buffer for the LRU and clear the (now stale) dispose list
* state flag
*/
- if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
+ if (list_lru_add_obj(&bp->b_target->bt_lru, &bp->b_lru)) {
bp->b_state &= ~XFS_BSTATE_DISPOSE;
atomic_inc(&bp->b_hold);
}
@@ -1060,7 +1060,7 @@ xfs_buf_rele(
* was on was the disposal list
*/
if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
- list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
+ list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru);
} else {
ASSERT(list_empty(&bp->b_lru));
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index a013b87ab8d5..61a45a86ffe8 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1065,7 +1065,7 @@ xfs_qm_dqput(
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
trace_xfs_dqput_free(dqp);
- if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
+ if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
xfs_dqunlock(dqp);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 7cb75cb6b8e9..57076a25f17d 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -482,9 +482,9 @@ xfs_fs_goingdown(
{
switch (inflags) {
case XFS_FSOP_GOING_FLAGS_DEFAULT: {
- if (!freeze_bdev(mp->m_super->s_bdev)) {
+ if (!bdev_freeze(mp->m_super->s_bdev)) {
xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
- thaw_bdev(mp->m_super->s_bdev);
+ bdev_thaw(mp->m_super->s_bdev);
}
break;
}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 94a7932ac570..67d0a8564ff3 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -171,7 +171,7 @@ xfs_qm_dqpurge(
* hits zero, so it really should be on the freelist here.
*/
ASSERT(!list_empty(&dqp->q_lru));
- list_lru_del(&qi->qi_lru, &dqp->q_lru);
+ list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 764304595e8b..07857d967ee8 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -366,8 +366,9 @@ xfs_blkdev_get(
{
int error = 0;
- *handlep = bdev_open_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
- mp->m_super, &fs_holder_ops);
+ *handlep = bdev_open_by_path(name,
+ BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
+ mp->m_super, &fs_holder_ops);
if (IS_ERR(*handlep)) {
error = PTR_ERR(*handlep);
*handlep = NULL;
@@ -439,18 +440,12 @@ xfs_open_devices(
int error;
/*
- * blkdev_put() can't be called under s_umount, see the comment
- * in get_tree_bdev() for more details
- */
- up_write(&sb->s_umount);
-
- /*
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
if (error)
- goto out_relock;
+ return error;
}
if (mp->m_rtname) {
@@ -493,10 +488,7 @@ xfs_open_devices(
bdev_release(logdev_handle);
}
- error = 0;
-out_relock:
- down_write(&sb->s_umount);
- return error;
+ return 0;
out_free_rtdev_targ:
if (mp->m_rtdev_targp)
@@ -509,7 +501,7 @@ out_relock:
out_close_logdev:
if (logdev_handle)
bdev_release(logdev_handle);
- goto out_relock;
+ return error;
}
/*
@@ -759,10 +751,6 @@ static void
xfs_mount_free(
struct xfs_mount *mp)
{
- /*
- * Free the buftargs here because blkdev_put needs to be called outside
- * of sb->s_umount, which is held around the call to ->put_super.
- */
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
xfs_free_buftarg(mp->m_logdev_targp);
if (mp->m_rtdev_targp)
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index b2c9b35df8f7..6ab2318a9c8e 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -180,7 +180,7 @@ const struct address_space_operations zonefs_file_aops = {
.invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = zonefs_swap_activate,
};
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1216d72c650f..2b3ae51f950d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -14,7 +14,7 @@
struct acpi_handle_list {
u32 count;
- acpi_handle* handles;
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -25,16 +25,15 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
bool acpi_handle_list_equal(struct acpi_handle_list *list1,
struct acpi_handle_list *list2);
void acpi_handle_list_replace(struct acpi_handle_list *dst,
struct acpi_handle_list *src);
void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
@@ -366,6 +365,98 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
u32 pld_crc;
@@ -384,6 +475,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -764,10 +856,71 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2);
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 6126c977ece0..3a0995f8bce8 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -144,6 +144,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4230392b5b0b..3d538d4178ab 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -75,6 +75,15 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
return __acpi_video_get_backlight_type(false, NULL);
}
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
static inline bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index 1a3ad6d29833..c32e0cf23c90 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end);
void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+int __init early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arch_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 699650f81970..a84c64e5f11e 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p)
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be24(const u32 val, void *p)
@@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
+ *p++ = val & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = (val >> 16) & 0xff;
}
static inline void put_unaligned_le24(const u32 val, void *p)
@@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
static inline void __put_unaligned_be48(const u64 val, u8 *p)
{
- *p++ = val >> 40;
- *p++ = val >> 32;
- *p++ = val >> 24;
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 40) & 0xff;
+ *p++ = (val >> 32) & 0xff;
+ *p++ = (val >> 24) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be48(const u64 val, void *p)
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 30a347e5aa11..4490d43c63e3 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -74,7 +74,7 @@ struct ttm_pool {
bool use_dma32;
struct {
- struct ttm_pool_type orders[MAX_ORDER + 1];
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4db54e928b36..118a18b7ff84 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -424,6 +424,13 @@ extern int acpi_blacklisted(void);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
@@ -756,6 +763,10 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
+
#include <acpi/acpi_numa.h>
struct fwnode_handle;
@@ -772,17 +783,6 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
-{
- return false;
-}
-
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
-{
- return false;
-}
-
static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
return -ENODEV;
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index a07b510e7dc5..a63d61ca55af 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
@@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/async.h b/include/linux/async.h
index cce4ad31e8fc..33c9ff4afb49 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..3f1fe1774f1b
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ struct file *user_file;
+ void (*accessed)(struct file *);
+ void (*end_write)(struct file *);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b29ebd53417d..7c2316c91cbd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -57,20 +57,18 @@ struct block_device {
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
struct mutex bd_holder_lock;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
int bd_holders;
struct kobject *bd_holder_dir;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
- struct super_block *bd_fsfreeze_sb;
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
struct partition_meta_info *bd_meta_info;
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
bool bd_ro_warned;
+ int bd_writers;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 88e9dd4b71fb..c30a98e08423 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -124,6 +124,8 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
/* open for "writes" only for ioctls (specialy hack for floppy.c) */
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
struct gendisk {
/*
@@ -1468,8 +1470,23 @@ struct blk_holder_ops {
* Sync the file system mounted on the block device.
*/
void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
};
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
extern const struct blk_holder_ops fs_holder_ops;
/*
@@ -1477,7 +1494,8 @@ extern const struct blk_holder_ops fs_holder_ops;
* as stored in sb->s_flags.
*/
#define sb_open_mode(flags) \
- (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
struct bdev_handle {
struct block_device *bdev;
@@ -1485,10 +1503,6 @@ struct bdev_handle {
blk_mode_t mode;
};
-struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
- const struct blk_holder_ops *hops);
-struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
- void *holder, const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
@@ -1496,7 +1510,6 @@ struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-void blkdev_put(struct block_device *bdev, void *holder);
void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
@@ -1541,8 +1554,8 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
}
#endif /* CONFIG_BLOCK */
-int freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev);
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 5f23ee599889..d78454a4dd1f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,7 +205,6 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -252,11 +251,10 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[],
* address_spaces.
*/
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
int __block_write_full_folio(struct inode *inode, struct folio *folio,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
+ get_block_t *get_block, struct writeback_control *wbc);
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
@@ -270,7 +268,6 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
-void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4a6b6b77ccb6..ea48c861cd36 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -496,6 +496,20 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
struct list_head rstat_css_list;
+ /*
+ * Add padding to separate the read mostly rstat_cpu and
+ * rstat_css_list into a different cacheline from the following
+ * rstat_flush_next and *bstat fields which can have frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /*
+ * A singly-linked list of cgroup structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ */
+ struct cgroup *rstat_flush_next;
+
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
@@ -548,6 +562,10 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
/*
* The root cgroup. The containing cgroup_root will be destroyed on its
* release. cgrp->ancestors[0] will be used overflowing into the
@@ -561,9 +579,6 @@ struct cgroup_root {
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0ef0af66080e..34aaf0e87def 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -69,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -386,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
@@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 9f1a9c455b68..c2d09bc4f976 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* trivial wrapper around DEFINE_CLASS() above specifically
* for locks.
*
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
* guard(name):
- * an anonymous instance of the (guard) class
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
*
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
* bound to the next (compound) statement.
*
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
*/
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
- DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ class_##_name##_t _T) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
#define guard(_name) \
CLASS(_name, __UNIQUE_ID(guard))
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
#define scoped_guard(_name, args...) \
for (CLASS(_name, scope)(args), \
- *done = NULL; !done; done = (void *)1)
+ *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1) \
+ if (!__guard_ptr(_name)(&scope)) _fail; \
+ else
/*
* Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
*
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
*
* will result in the following type:
*
@@ -173,6 +204,11 @@ typedef struct { \
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
if (_T->lock) { _unlock; } \
+} \
+ \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+{ \
+ return _T->lock; \
}
@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+
#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1c5ca92a0555..afda5f24d3dd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1203,6 +1203,7 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
+
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index efc0c0b07efb..172d0a743e5d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -66,15 +66,12 @@ enum cpuhp_state {
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
- /* Must be after CPUHP_MM_VMSTAT_DEAD */
- CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -96,7 +93,6 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
@@ -108,7 +104,6 @@ enum cpuhp_state {
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -118,13 +113,11 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
@@ -151,18 +144,14 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -179,7 +168,6 @@ enum cpuhp_state {
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
@@ -217,9 +205,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -251,9 +237,7 @@ enum cpuhp_state {
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
- /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
- CPUHP_AP_MM_DEMOTION_ONLINE,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d629094fac6e..875d12598bd2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -77,6 +77,7 @@ extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -207,6 +208,11 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
+}
+
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 5126a4fecb44..9eaeaafe0cad 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -87,12 +87,6 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
-#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
-#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
-#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
-#endif
-#endif
-
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base,
unsigned long long *low_size, bool *high);
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396bb8..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index e00ddf1ed39c..5881e4ac30be 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -2,7 +2,7 @@
/*
* DAMON api
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifndef _DAMON_H_
@@ -136,6 +136,9 @@ enum damos_action {
* @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
* @weight_age: Weight of the region's age for prioritization.
*
+ * @get_score: Feedback function for self-tuning quota.
+ * @get_score_arg: Parameter for @get_score
+ *
* To avoid consuming too much CPU time or IO resources for applying the
* &struct damos->action to large memory, DAMON allows users to set time and/or
* size quotas. The quotas can be set by writing non-zero values to &ms and
@@ -153,6 +156,17 @@ enum damos_action {
* You could customize the prioritization logic by setting &weight_sz,
* &weight_nr_accesses, and &weight_age, because monitoring operations are
* encouraged to respect those.
+ *
+ * If @get_score function pointer is set, DAMON calls it back with
+ * @get_score_arg and get the return value of it for every @reset_interval.
+ * Then, DAMON adjusts the effective quota using the return value as a feedback
+ * score to the current quota, using its internal feedback loop algorithm.
+ *
+ * The feedback loop algorithem assumes the quota input and the feedback score
+ * output are in a positive proportional relationship, and the goal of the
+ * tuning is getting the feedback screo value of 10,000. If @ms and/or @sz are
+ * set together, those work as a hard limit quota. If neither @ms nor @sz are
+ * set, the mechanism starts from the quota of one byte.
*/
struct damos_quota {
unsigned long ms;
@@ -163,6 +177,9 @@ struct damos_quota {
unsigned int weight_nr_accesses;
unsigned int weight_age;
+ unsigned long (*get_score)(void *arg);
+ void *get_score_arg;
+
/* private: */
/* For throughput estimation */
unsigned long total_charged_sz;
@@ -179,6 +196,9 @@ struct damos_quota {
/* For prioritization */
unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
};
/**
diff --git a/include/linux/edac.h b/include/linux/edac.h
index fa4bda2a70f6..1174beb94ab6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -187,6 +187,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2.
* @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +219,7 @@ enum mem_type {
MEM_NVDIMM,
MEM_WIO2,
MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -248,6 +250,7 @@ enum mem_type {
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
* enum edac_type - Error Detection and Correction capabilities and mode
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index b9caa01dfac4..88d91e087471 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ unsigned long freq, ref_freq, scale_cpu;
struct em_perf_state *ps;
int cpu;
@@ -241,11 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
+ ref_freq = arch_scale_freq_ref(cpu);
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ freq = map_util_freq(max_util, ref_freq, scale_cpu);
/*
* Find the lowest performance state of the Energy Model above the
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index d95ab85f96ba..b0fb775a600d 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -7,6 +7,11 @@
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/context_tracking.h>
+#include <linux/livepatch.h>
+#include <linux/resume_user_mode.h>
+#include <linux/tick.h>
+#include <linux/kmsan.h>
#include <asm/entry-common.h>
@@ -98,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
* done between establishing state and enabling interrupts. The caller must
* enable interrupts before invoking syscall_enter_from_user_mode_work().
*/
-void enter_from_user_mode(struct pt_regs *regs);
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
/**
* syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
@@ -117,6 +134,9 @@ void enter_from_user_mode(struct pt_regs *regs);
*/
void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ unsigned long work);
+
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
* a syscall
@@ -140,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
/**
* syscall_enter_from_user_mode - Establish state and check and handle work
@@ -159,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
/**
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
@@ -259,6 +299,43 @@ static __always_inline void arch_exit_to_user_mode(void) { }
void arch_do_signal_or_restart(struct pt_regs *regs);
/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work);
+
+/**
+ * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ */
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
* Syscall/interrupt exit enables interrupts, but the kernel state is
@@ -276,7 +353,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
* non-instrumentable.
* The caller has to invoke syscall_exit_to_user_mode_work() before this.
*/
-void exit_to_user_mode(void);
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index b9d83652c097..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
@@ -58,15 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
-}
-
-static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
- unsigned mask)
-{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -92,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 01fc495a83e2..36ec884320d9 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
+extern int evm_inode_copy_up_xattr(const char *name);
extern int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
@@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
+static inline int evm_inode_copy_up_xattr(const char *name)
+{
+ return 0;
+}
+
static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry,
const char *xattr_name)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index bc4c3287a65e..78c8326d74ae 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -83,12 +83,17 @@ struct dentry;
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
@@ -114,7 +119,7 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern struct file *close_fd_get_file(unsigned int fd);
+extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/file.h b/include/linux/file.h
index 6e9099d29343..6834a29338c4 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -96,18 +96,8 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
extern void fd_install(unsigned int fd, struct file *file);
-extern int __receive_fd(struct file *file, int __user *ufd,
- unsigned int o_flags);
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
-extern int receive_fd(struct file *file, unsigned int o_flags);
-
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(file, ufd, o_flags);
-}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 98b7a7a8c42e..e6ba0cc6f2ee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -434,7 +434,7 @@ struct address_space_operations {
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
@@ -463,9 +463,9 @@ extern const struct address_space_operations empty_aops;
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
@@ -484,9 +484,9 @@ struct address_space {
unsigned long flags;
struct rw_semaphore i_mmap_rwsem;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -991,8 +991,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
*/
struct file {
union {
+ /* fput() uses task work when closing and freeing file (default). */
+ struct callback_head f_task_work;
+ /* fput() must use workqueue (most kernel threads). */
struct llist_node f_llist;
- struct rcu_head f_rcuhead;
unsigned int f_iocb_flags;
};
@@ -1164,6 +1166,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
@@ -1185,7 +1188,8 @@ enum {
struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
- unsigned short freeze_holders; /* Who froze fs? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1645,9 +1649,70 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
static inline bool sb_write_started(const struct super_block *sb)
{
- return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
+}
+
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
/**
@@ -2029,9 +2094,6 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags,
@@ -2051,9 +2113,24 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
};
struct super_operations {
@@ -2517,26 +2594,31 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *user_path, int flags,
- const struct path *real_path,
- const struct cred *cred);
struct path *backing_file_user_path(struct file *f);
/*
- * file_user_path - get the path to display for memory mapped file
- *
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
* stored in ->vm_file is a backing file whose f_inode is on the underlying
- * filesystem. When the mapped file path is displayed to user (e.g. via
- * /proc/<pid>/maps), this helper should be used to get the path to display
- * to the user, which is the path of the fd that user has requested to map.
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
*/
+/* Get the path to display in /proc/<pid>/maps */
static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return backing_file_user_path(f);
return &f->f_path;
}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
static inline struct file *file_clone_open(struct file *file)
{
@@ -2991,8 +3073,6 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
@@ -3121,7 +3201,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bcb6609b54b3..11e6434b8e71 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -100,29 +100,49 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ __u32 fsnotify_mask = FS_ACCESS_PERM;
+
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
- if (!(mask & (MAY_READ | MAY_OPEN)))
+ if (!(perm_mask & MAY_READ))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_file(file, fsnotify_mask);
+}
+
+/*
+ * fsnotify_file_perm - permission hook before file access
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_open_perm - permission hook before file open
+ */
+static inline int fsnotify_open_perm(struct file *file)
+{
+ int ret;
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ if (ret)
+ return ret;
}
- return fsnotify_file(file, fsnotify_mask);
+ return fsnotify_file(file, FS_OPEN_PERM);
}
/*
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 6583a58670c5..1b6053da8754 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -162,25 +162,25 @@ typedef unsigned int __bitwise gfp_t;
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules
+ * implicit rules.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
* it can sleep). It will avoid disruptive actions like OOM killer. The
* caller must handle the failure which is quite likely to happen under
* heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
+ * handled at small cost, such as reduced throughput.
*
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
* procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
* compaction (which removes fragmentation) and page-out.
* There is still a definite limit to the number of retries, but it is
* a larger limit than with %__GFP_NORETRY.
@@ -230,7 +230,7 @@ typedef unsigned int __bitwise gfp_t;
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
* memory tags at the same time as zeroing memory has minimal additional
- * performace impact.
+ * performance impact.
*
* %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
* Used for userspace and vmalloc pages; the latter are unpoisoned by
@@ -274,7 +274,8 @@ typedef unsigned int __bitwise gfp_t;
* accounted to kmemcg.
*
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
@@ -325,7 +326,7 @@ typedef unsigned int __bitwise gfp_t;
#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index be20cff4ba73..451c1dff0e87 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -484,6 +484,82 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
}
/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
+{
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
+}
+
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
+}
+
+/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
* @folio: The folio to copy from.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index fa0350b0812a..5adb86af35fc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -67,6 +67,26 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP.
+ */
+#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+
+#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -77,45 +97,105 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
-#define hugepage_flags_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define hugepage_flags_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
+static inline bool hugepage_global_enabled(void)
+{
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
+
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+
+static inline bool hugepage_flags_enabled(void)
+{
+ /*
+ * We cover both the anon and the file-backed case here; we must return
+ * true if globally enabled, even when all anon sizes are set to never.
+ * So we don't need to look at huge_anon_orders_inherit.
+ */
+ return hugepage_global_enabled() ||
+ huge_anon_orders_always ||
+ huge_anon_orders_madvise;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
+
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
/*
* Do the below checks:
* - For file vma, check if the linear page offset of vma is
- * HPAGE_PMD_NR aligned within the file. The hugepage is
- * guaranteed to be hugepage-aligned within the file, but we must
- * check that the PMD-aligned addresses in the VMA map to
- * PMD-aligned offsets within the file, else the hugepage will
- * not be PMD-mappable.
- * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
* area.
*/
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
unsigned long haddr;
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
- HPAGE_PMD_NR))
+ hpage_size >> PAGE_SHIFT))
return false;
}
- haddr = addr & HPAGE_PMD_MASK;
+ haddr = ALIGN_DOWN(addr, hpage_size);
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -126,12 +206,55 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = vma->vm_file->f_inode;
return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- (vma->vm_flags & VM_EXEC) &&
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs);
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @smaps: whether answer will be used for smaps file
+ * @in_pf: whether answer will be used by page fault handler
+ * @enforce_sysfs: whether sysfs config should be taken into account
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Optimization to check if required orders are enabled early. */
+ if (enforce_sysfs && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
+ enforce_sysfs, orders);
+}
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -267,17 +390,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
+}
+
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ return 0;
}
static inline void folio_prep_large_rmappable(struct folio *folio) {}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 236ec7b63c54..c1ee640d87b1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -829,7 +829,7 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_ORDER;
+ return huge_page_order(h) > MAX_PAGE_ORDER;
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 40fc5813cf93..bccb3f1f6262 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -37,13 +37,6 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __section(".data..init_task")
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __section(".data..init_thread_info")
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 14f5cfabbbc8..db7fe25f3370 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -331,6 +331,9 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72cb693b075b..dbb06d789e74 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -4,6 +4,7 @@
#include <linux/bug.h>
#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -129,20 +130,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
__kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -172,13 +192,6 @@ static __always_inline void kasan_kfree_large(void *ptr)
__kasan_kfree_large(ptr, _RET_IP_);
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr)
-{
- if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, _RET_IP_);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
void *object, gfp_t flags, bool init);
static __always_inline void * __must_check kasan_slab_alloc(
@@ -219,6 +232,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -242,9 +362,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
return false;
}
static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
@@ -256,7 +376,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init
return false;
}
static inline void kasan_kfree_large(void *ptr) {}
-static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
@@ -276,6 +395,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
static inline bool kasan_check_byte(const void *address)
{
return true;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 8227455192b7..400cb6c02176 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -403,7 +403,7 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
@@ -500,6 +500,13 @@ static inline int crash_hotplug_memory_support(void) { return 0; }
static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, ...) \
+ printk("%s" fmt, \
+ kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
+ ##__VA_ARGS__)
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c2dd786a30e1..401348e9f92b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,8 +76,8 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index b35968ee9fb5..7675a48a0701 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -73,8 +73,10 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
* nothing. Therefore the caller does not need to keep state about whether or
@@ -83,24 +85,54 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* the caller organize itself in a way that elements can be in more than
* one type of list, it is up to the caller to fully remove the item from
* the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
+ * to @lru.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
* Return value: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
*
- * This function works analogously as list_lru_add in terms of list
+ * This function works analogously as list_lru_add() in terms of list
* manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * a list are also valid for list_lru_del().
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return value: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -108,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -136,12 +170,28 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
+/**
+ * list_lru_putback: undo list_lru_isolate
+ * @lru: the lru pointer.
+ * @item: the item to put back.
+ * @nid: the node id of the sublist to put the item back to.
+ * @memcg: the cgroup of the sublist to put the item back to.
+ *
+ * Put back an isolated item into its original LRU. Note that unlike
+ * list_lru_add, this does not increment the node LRU count (as
+ * list_lru_isolate does not originally decrement this count).
+ *
+ * Since we might have dropped the LRU lock in between, recompute list_lru_one
+ * from the node's id and memcg.
+ */
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -150,24 +200,24 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -176,7 +226,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 2ebc323d345a..857d785e89e6 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -127,12 +127,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
u8 wait_type_inner;
u8 wait_type_outer;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index ff217a5ce552..185924c56378 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -171,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
@@ -262,6 +264,10 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, size_t *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, size_t size, u32 flags)
LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index dcb5e5b5eb13..a2ade0ffe9e7 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,6 +25,7 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
@@ -42,6 +43,18 @@ struct security_hook_heads {
#undef LSM_HOOK
} __randomize_layout;
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
@@ -50,7 +63,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- const char *lsm;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
@@ -104,7 +117,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- const char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index d01e850b570f..b3d63123b945 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -256,6 +256,8 @@ struct maple_tree {
struct maple_tree name = MTREE_INIT(name, 0)
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
/*
@@ -327,6 +329,9 @@ int mtree_store(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
void *mtree_erase(struct maple_tree *mt, unsigned long index);
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
void mtree_destroy(struct maple_tree *mt);
void __mt_destroy(struct maple_tree *mt);
@@ -345,6 +350,36 @@ static inline bool mtree_empty(const struct maple_tree *mt)
/* Advanced API */
/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
* The maple state is defined in the struct ma_state and is used to keep track
* of information during operations, and even between operations when using the
* advanced API.
@@ -376,6 +411,13 @@ static inline bool mtree_empty(const struct maple_tree *mt)
* When returning a value the maple state index and last respectively contain
* the start and end of the range for the entry. Ranges are inclusive in the
* Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
*/
struct ma_state {
struct maple_tree *tree; /* The tree we're operating in */
@@ -385,9 +427,11 @@ struct ma_state {
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
};
struct ma_wr_state {
@@ -397,7 +441,6 @@ struct ma_wr_state {
unsigned long r_max; /* range max */
enum maple_type type; /* mas->node type */
unsigned char offset_end; /* The offset where the write ends */
- unsigned char node_end; /* mas->node end */
unsigned long *pivots; /* mas->node->pivots pointer */
unsigned long end_piv; /* The pivot at the offset end */
void __rcu **slots; /* mas->node->slots pointer */
@@ -406,30 +449,16 @@ struct ma_wr_state {
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
-
/*
* Special values for ma_state.node.
- * MAS_START means we have not searched the tree.
- * MAS_ROOT means we have searched the tree and the entry we found lives in
- * the root of the tree (ie it has index 0, length 1 and is the only entry in
- * the tree).
- * MAS_NONE means we have searched the tree and there is no node in the
- * tree for this entry. For example, we searched for index 1 in an empty
- * tree. Or we have a tree which points to a full leaf node and we
- * searched for an entry which is larger than can be contained in that
- * leaf node.
* MA_ERROR represents an errno. After dropping the lock and attempting
* to resolve the error, the walk would have to be restarted from the
* top of the tree as the tree may have been modified.
*/
-#define MAS_START ((struct maple_enode *)1UL)
-#define MAS_ROOT ((struct maple_enode *)5UL)
-#define MAS_NONE ((struct maple_enode *)9UL)
-#define MAS_PAUSE ((struct maple_enode *)17UL)
-#define MAS_OVERFLOW ((struct maple_enode *)33UL)
-#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -438,7 +467,8 @@ struct ma_wr_state {
.tree = mt, \
.index = first, \
.last = end, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
.alloc = NULL, \
@@ -469,7 +499,6 @@ void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
-bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
@@ -498,28 +527,18 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
mas->tree = tree;
mas->index = mas->last = addr;
mas->max = ULONG_MAX;
- mas->node = MAS_START;
-}
-
-/* Checks if a mas has not found anything */
-static inline bool mas_is_none(const struct ma_state *mas)
-{
- return mas->node == MAS_NONE;
+ mas->status = ma_start;
+ mas->node = NULL;
}
-/* Checks if a mas has been paused */
-static inline bool mas_is_paused(const struct ma_state *mas)
+static inline bool mas_is_active(struct ma_state *mas)
{
- return mas->node == MAS_PAUSE;
+ return mas->status == ma_active;
}
-/* Check if the mas is pointing to a node or not */
-static inline bool mas_is_active(struct ma_state *mas)
+static inline bool mas_is_err(struct ma_state *mas)
{
- if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
- return true;
-
- return false;
+ return mas->status == ma_error;
}
/**
@@ -532,9 +551,10 @@ static inline bool mas_is_active(struct ma_state *mas)
*
* Context: Any context.
*/
-static inline void mas_reset(struct ma_state *mas)
+static __always_inline void mas_reset(struct ma_state *mas)
{
- mas->node = MAS_START;
+ mas->status = ma_start;
+ mas->node = NULL;
}
/**
@@ -550,6 +570,131 @@ static inline void mas_reset(struct ma_state *mas)
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
/**
* __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
* current location.
@@ -563,6 +708,9 @@ static inline void mas_reset(struct ma_state *mas)
static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
unsigned long last)
{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
mas->index = start;
mas->last = last;
}
@@ -580,8 +728,8 @@ static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
+ mas_reset(mas);
__mas_set_range(mas, start, last);
- mas->node = MAS_START;
}
/**
@@ -706,129 +854,4 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
for (__entry = mt_find(__tree, &(__index), __max); \
__entry; __entry = mt_find_after(__tree, &(__index), __max))
-
-#ifdef CONFIG_DEBUG_MAPLE_TREE
-enum mt_dump_format {
- mt_dump_dec,
- mt_dump_hex,
-};
-
-extern atomic_t maple_tree_tests_run;
-extern atomic_t maple_tree_tests_passed;
-
-void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
-void mas_dump(const struct ma_state *mas);
-void mas_wr_dump(const struct ma_wr_state *wr_mas);
-void mt_validate(struct maple_tree *mt);
-void mt_cache_shrink(void);
-#define MT_BUG_ON(__tree, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_BUG_ON(__mas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_WR_BUG_ON(__wrmas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MT_WARN_ON(__tree, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WARN_ON(__mas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-#else
-#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
-#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
-#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
-#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
-#endif /* CONFIG_DEBUG_MAPLE_TREE */
-
#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ae3bde302f70..b695f9e946da 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -123,6 +123,7 @@ int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7bdcf3020d7a..20ff87f8e001 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -219,6 +219,12 @@ struct mem_cgroup {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
#endif
unsigned long soft_limit;
@@ -324,7 +330,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
@@ -821,6 +827,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return !memcg || css_tryget(&memcg->css);
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -1046,8 +1057,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
-void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_ratelimited(void);
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1187,6 +1198,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
@@ -1349,6 +1365,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return true;
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1548,11 +1569,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mem_cgroup_flush_stats(void)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void mem_cgroup_flush_stats_ratelimited(void)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
@@ -1926,6 +1947,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
#else
static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
{
@@ -1939,6 +1961,11 @@ static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
#endif
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 4aae6c06c5f2..7be1e32e6d42 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -51,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
+extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1314d9c5f05b..744c830f4b13 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -196,8 +196,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
@@ -228,16 +226,6 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
return false;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
-}
-
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index da5219b48d52..896c0079f64f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -994,6 +994,17 @@ static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
return mas_expected_entries(&vmi->mas, count);
}
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
@@ -1804,7 +1815,7 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
static inline u8 page_kasan_tag(const struct page *page)
{
- u8 tag = 0xff;
+ u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
@@ -1833,7 +1844,7 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
- page_kasan_tag_set(page, 0xff);
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -1953,15 +1964,15 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
}
/**
@@ -2373,7 +2384,8 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long address, struct pt_regs *regs);
@@ -3859,6 +3871,32 @@ void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+ altmap->alloc -= nr_pfns;
+}
+#else
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
#define VMEMMAP_RESERVE_NR 2
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 957ce38768b2..4dd996ad0bd3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -401,11 +401,11 @@ FOLIO_MATCH(compound_head, _head_2a);
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
* @pt_mm: Used for x86 pgds.
- * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
- * @_refcount: Same as page refcount. Used for s390 page tables.
+ * @__page_refcount: Same as page refcount.
* @pt_memcg_data: Memcg data. Tracked for page tables here.
*
* This struct overlays struct page for now. Do not modify without a good
@@ -438,7 +438,7 @@ struct ptdesc {
#endif
};
unsigned int __page_type;
- atomic_t _refcount;
+ atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long pt_memcg_data;
#endif
@@ -452,7 +452,7 @@ TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
-TABLE_MATCH(_refcount, _refcount);
+TABLE_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
TABLE_MATCH(memcg_data, pt_memcg_data);
#endif
@@ -600,6 +600,9 @@ struct vma_numab_state {
*/
unsigned long pids_active[2];
+ /* MM scan sequence ID when scan first started after VMA creation */
+ int start_scan_seq;
+
/*
* MM scan sequence ID when the VMA was last completely scanned.
* A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
@@ -958,7 +961,7 @@ struct mm_struct {
*/
unsigned long ksm_zero_pages;
#endif /* CONFIG_KSM */
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
@@ -973,7 +976,7 @@ struct mm_struct {
struct mem_cgroup *memcg;
#endif
} lru_gen;
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout;
/*
@@ -1011,11 +1014,13 @@ struct lru_gen_mm_list {
spinlock_t lock;
};
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
-#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm);
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1036,7 +1041,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-#else /* !CONFIG_LRU_GEN */
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
@@ -1046,11 +1051,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
-#ifdef CONFIG_MEMCG
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1060,7 +1063,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator {
struct ma_state mas;
@@ -1071,7 +1074,8 @@ struct vma_iterator {
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
}, \
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9db36e197712..4ed33b127821 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -22,18 +22,21 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
-#define MAX_ORDER 10
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
@@ -95,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt)
}
#define for_each_migratetype_order(order, type) \
- for (order = 0; order <= MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
@@ -207,6 +210,10 @@ enum node_stat_item {
PGPROMOTE_SUCCESS, /* promote successfully */
PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
NR_VM_NODE_STAT_ITEMS
};
@@ -435,14 +442,12 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
-#ifdef CONFIG_MEMCG
/* the memcg generation this lru_gen_folio belongs to */
u8 gen;
/* the list segment this lru_gen_folio belongs to */
u8 seg;
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_node list;
-#endif
};
enum {
@@ -488,11 +493,6 @@ struct lru_gen_mm_walk {
bool force_scan;
};
-void lru_gen_init_lruvec(struct lruvec *lruvec);
-void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
-
-#ifdef CONFIG_MEMCG
-
/*
* For each node, memcgs are divided into two generations: the old and the
* young. For each generation, memcgs are randomly sharded into multiple bins
@@ -550,6 +550,8 @@ struct lru_gen_memcg {
};
void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -558,19 +560,6 @@ void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
-#else /* !CONFIG_MEMCG */
-
-#define MEMCG_NR_GENS 1
-
-struct lru_gen_memcg {
-};
-
-static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
-{
-}
-
-#endif /* CONFIG_MEMCG */
-
#else /* !CONFIG_LRU_GEN */
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
@@ -585,8 +574,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
}
-#ifdef CONFIG_MEMCG
-
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
@@ -611,8 +598,6 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}
-#endif /* CONFIG_MEMCG */
-
#endif /* CONFIG_LRU_GEN */
struct lruvec {
@@ -635,12 +620,15 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
/* Isolate for asynchronous migration */
@@ -947,10 +935,10 @@ struct zone {
CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER + 1];
+ struct free_area free_area[NR_PAGE_ORDERS];
#ifdef CONFIG_UNACCEPTED_MEMORY
- /* Pages to be accepted. All pages on the list are MAX_ORDER */
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
#endif
@@ -1760,8 +1748,8 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
@@ -1793,6 +1781,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+ struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
@@ -1986,7 +1975,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
- return test_bit(idx, ms->usage->subsection_map);
+ return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
@@ -2010,6 +1999,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ int ret;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
@@ -2023,13 +2013,19 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
+ rcu_read_lock();
+ if (!valid_section(ms)) {
+ rcu_read_unlock();
return 0;
+ }
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ ret = early_section(ms) || pfn_section_valid(ms, pfn);
+ rcu_read_unlock();
+
+ return ret;
}
#endif
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index b8da2db4ecd2..cd4d5c8781f5 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -244,7 +244,4 @@ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
}
-bool check_fsmapping(const struct mnt_idmap *idmap,
- const struct super_block *sb);
-
#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index ac3dd2876197..c34c18b4e8f3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -50,8 +50,7 @@ struct path;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
#define MNT_INTERNAL 0x4000
@@ -65,7 +64,7 @@ struct path;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index c29ace15a053..e84522e31301 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1003,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -1010,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -1021,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -1104,6 +1115,7 @@ struct nand_controller_ops {
* the bus without restarting an entire read operation nor
* changing the column.
* @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
@@ -1112,6 +1124,7 @@ struct nand_controller {
unsigned int data_only_read: 1;
unsigned int cont_read: 1;
} supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1265,6 +1278,7 @@ struct nand_secure_region {
* @cont_read: Sequential page read internals
* @cont_read.ongoing: Whether a continuous read is ongoing or not
* @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
* @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
@@ -1321,6 +1335,7 @@ struct nand_chip {
struct {
bool ongoing;
unsigned int first_page;
+ unsigned int pause_page;
unsigned int last_page;
} cont_read;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a33aa9eb9fc3..95d11308f995 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -221,6 +221,7 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index bdcd85e622d8..4d103ac8f5c7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -89,8 +89,6 @@ struct nubus_driver {
void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
extern bool nubus_populate_procfs;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a88e64acebfe..735cddc13d20 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -772,19 +772,14 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-bool __folio_start_writeback(struct folio *folio, bool keep_write);
-bool set_page_writeback(struct page *page);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
#define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true)
-static inline bool test_set_page_writeback(struct page *page)
-{
- return set_page_writeback(page);
-}
-
static __always_inline bool folio_test_head(struct folio *folio)
{
return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e83c4c095041..3f2409b968ec 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,14 +41,14 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order MAX_ORDER
+#define pageblock_order MAX_PAGE_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index dea043bc1e38..58a4c976c39b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1239,6 +1239,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 275799b5f535..844ffdac8d7d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2605,6 +2605,8 @@
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 143fbc10ecfe..b3b34f6670cf 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -60,12 +60,6 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
@@ -189,4 +183,26 @@ void armpmu_free_irq(int irq, int cpu);
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
+
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 9c226adf938a..46377e134d67 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -215,21 +215,27 @@
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
/*
* PMOVSR: counters overflow flag status reg
*/
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
/*
* Event filters for PMUv3
@@ -244,19 +250,19 @@
/*
* PMUSERENR: user enable reg
*/
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
/*
* This code is really good
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5547ba68e6e4..d2a15c0c6f8a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1143,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event)
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
@@ -1177,6 +1186,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
union perf_sample_weight weight;
union perf_mem_data_src data_src;
u64 txn;
@@ -1254,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
- struct perf_branch_stack *brs)
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
{
int size = sizeof(u64); /* nr */
@@ -1262,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
size += sizeof(u64);
size += brs->nr * sizeof(struct perf_branch_entry);
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..466cf477551a 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -184,6 +184,13 @@ static inline int pmd_young(pmd_t pmd)
}
#endif
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
@@ -375,7 +382,7 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void)
*/
static inline bool arch_has_hw_pte_young(void)
{
- return false;
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
}
#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3cc52826f18e..bd285950972c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -568,7 +568,6 @@ struct macsec_ops;
* - Bits [31:24] are reserved for defining generic
* PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
- * @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
* @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
* @sfp_bus: SFP bus attached to this PHY's fiber port
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index ccd97bcef269..76dcb7f37bcd 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -45,18 +45,6 @@ struct dev_pm_opp_supply {
unsigned long u_watt;
};
-/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
- *
- * This structure stores the bandwidth values for a single interconnect path.
- */
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
-};
-
typedef int (*config_regulators_t)(struct device *dev,
struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
struct regulator **regulators, unsigned int count);
@@ -74,8 +62,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
* @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * attach. Mutually exclusive with required_devs.
+ * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
+ * exclusive with required_devs.
+ * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -90,11 +80,15 @@ struct dev_pm_opp_config {
const char * const *regulator_names;
const char * const *genpd_names;
struct device ***virt_devs;
+ struct device **required_devs;
};
+#define OPP_LEVEL_UNSET U32_MAX
+
/**
* struct dev_pm_opp_data - The data to use to initialize an OPP.
- * @level: The performance level for the OPP.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
* @freq: The clock rate in Hz for the OPP.
* @u_volt: The voltage in uV for the OPP.
*/
@@ -157,7 +151,7 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
- unsigned long *level);
+ unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index);
@@ -324,7 +318,7 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
}
static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
- unsigned long *level)
+ unsigned int *level)
{
return ERR_PTR(-EOPNOTSUPP);
}
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 267fb8a4fb6e..ddbe7c3ca4ce 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -435,7 +435,7 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern struct bus_type pnp_bus_type;
+extern const struct bus_type pnp_bus_type;
#if defined(CONFIG_PNP)
diff --git a/include/linux/property.h b/include/linux/property.h
index 9f2585d705a8..97f901c0914e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -489,6 +489,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index c4cc3b89ced1..abcdde4df697 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -177,7 +177,17 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void hw_protection_shutdown(const char *reason, int ms_until_forced);
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
+
+static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, false);
+}
+
+static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, true);
+}
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 39b666b40ea6..4660582a3302 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,52 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-/*
- * Following notifications should be emitted only if detected condition
- * is such that the HW is likely to still be working but consumers should
- * take a recovery action to prevent problems esacalating into errors.
- */
-#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
-#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
-#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
-#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
-#define REGULATOR_EVENT_WARN_MASK 0x1E000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4b7eceb3828b..22a07c0900a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -51,12 +51,7 @@ enum regulator_detection_severity {
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 621b7f4a3639..0cd76d264727 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -127,6 +134,8 @@ struct notification_limit {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
* @over_current_detection: Configure over current limits.
@@ -153,6 +162,13 @@ struct notification_limit {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -204,6 +220,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -214,6 +231,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
unsigned over_current_detection:1; /* notify on over current */
unsigned over_voltage_detection:1; /* notify on over voltage */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b26fe858fd44..b7944a833668 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -121,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -172,133 +177,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio);
typedef int __bitwise rmap_t;
/*
- * No special request: if the page is a subpage of a compound page, it is
- * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
*/
#define RMAP_NONE ((__force rmap_t)0)
-/* The (sub)page is exclusive to a single process. */
+/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
/*
- * The compound page is not mapped via PTEs, but instead via a single PMD and
- * should be accounted accordingly.
+ * Internally, we're using an enum to specify the granularity. We make the
+ * compiler emit specialized code for each granularity.
*/
-#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
+enum rmap_level {
+ RMAP_LEVEL_PTE = 0,
+ RMAP_LEVEL_PMD,
+};
+
+static inline void __folio_rmap_sanity_checks(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ break;
+ case RMAP_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ default:
+ VM_WARN_ON_ONCE(true);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-void page_add_file_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
- struct vm_area_struct *, bool compound);
-void page_remove_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-
-void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
-void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-static inline void __page_dup_rmap(struct page *page, bool compound)
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
{
- if (compound) {
- struct folio *folio = (struct folio *)page;
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- atomic_inc(&folio->_entire_mapcount);
- } else {
- atomic_inc(&page->_mapcount);
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
}
+ atomic_inc(&folio->_entire_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
}
-static inline void page_dup_file_rmap(struct page *page, bool compound)
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
{
- __page_dup_rmap(page, compound);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
}
/**
- * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
- * anonymous page
- * @page: the page to duplicate the mapping for
- * @compound: the page is mapped as compound or as a small page
- * @vma: the source vma
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
*
- * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * Duplicating the mapping can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+}
+#define folio_dup_file_rmap_pte(folio, page) \
+ folio_dup_file_rmap_ptes(folio, page, 1)
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
*
- * If duplicating the mapping succeeds, the page has to be mapped R/O into
- * the parent and the child. It must *not* get mapped writable after this call.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
- * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ * The caller needs to hold the page table lock.
*/
-static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
- struct vm_area_struct *vma)
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page)
{
- VM_BUG_ON_PAGE(!PageAnon(page), page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
- /*
- * No need to check+clear for already shared pages, including KSM
- * pages.
- */
- if (!PageAnonExclusive(page))
- goto dup;
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma,
+ enum rmap_level level)
+{
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
/*
- * If this page may have been pinned by the parent process,
- * don't allow to duplicate the mapping but instead require to e.g.,
- * copy the page immediately for the child so that we'll always
- * guarantee the pinned page won't be randomly replaced in the
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
* future on write faults.
*/
- if (likely(!is_device_private_page(page) &&
- unlikely(page_needs_cow_for_dma(vma, page))))
- return -EBUSY;
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
- ClearPageAnonExclusive(page);
/*
- * It's okay to share the anon page between both processes, mapping
- * the page R/O into both processes.
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
*/
-dup:
- __page_dup_rmap(page, compound);
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
return 0;
}
/**
- * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
- * shared to prepare for KSM or temporary unmapping
- * @page: the exclusive anonymous page to try marking possibly shared
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @src_vma: The vm area from which the mappings are duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated.
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
*
- * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
- * to duplicate a mapping, but instead to prepare for KSM or temporarily
- * unmapping a page (swap, migration) via page_remove_rmap().
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
*
- * Marking the page shared can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
*
- * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
- * otherwise.
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
-static inline int page_try_share_anon_rmap(struct page *page)
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma)
{
- VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
+ RMAP_LEVEL_PTE);
+}
+#define folio_try_dup_anon_rmap_pte(folio, page, vma) \
+ folio_try_dup_anon_rmap_ptes(folio, page, 1, vma)
- /* device private pages cannot get pinned via GUP. */
- if (unlikely(is_device_private_page(page))) {
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *src_vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
ClearPageAnonExclusive(page);
return 0;
}
@@ -349,7 +544,7 @@ static inline int page_try_share_anon_rmap(struct page *page)
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_mb();
- if (unlikely(page_maybe_dma_pinned(page)))
+ if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(page);
@@ -362,6 +557,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
return 0;
}
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
/*
* Called from mm/vmscan.c to handle paging out
*/
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 1dd530ce8b45..9c29689ff505 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-
-DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
-DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
/*
* downgrade write lock to read lock
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 292c31697248..d3097f0682d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -63,11 +63,13 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
struct user_event_mm;
/*
@@ -413,42 +415,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- *
- * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
- * updates. When a task is dequeued, its util_est should not be updated if its
- * util_avg has not been updated in the meantime.
- * This information is mapped into the MSB bit of util_est.enqueued at dequeue
- * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
- * for a task) it is safe to use MSB.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-#define UTIL_AVG_UNCHANGED 0x80000000
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -503,9 +469,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -523,7 +500,7 @@ struct sched_statistics {
u64 block_max;
s64 sum_block_runtime;
- u64 exec_max;
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -553,7 +530,7 @@ struct sched_entity {
struct load_weight load;
struct rb_node run_node;
u64 deadline;
- u64 min_deadline;
+ u64 min_vruntime;
struct list_head group_node;
unsigned int on_rq;
@@ -607,6 +584,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -654,6 +634,7 @@ struct sched_dl_entity {
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -668,7 +649,20 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ *
+ * @server_has_tasks() returns true if @server_pick return a
+ * runnable task.
+ */
+ struct rq *rq;
+ dl_server_has_tasks_f server_has_tasks;
+ dl_server_pick_f server_pick;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -795,6 +789,7 @@ struct task_struct {
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -1955,9 +1950,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index fe1a46f30d24..2b461129d1fa 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,6 +2,7 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
@@ -67,7 +68,8 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK);
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3499c1a8b929..015c0e3a3e1d 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -432,7 +432,6 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
@@ -646,6 +645,9 @@ extern bool current_is_single_threaded(void);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
lockdep_is_held(&tasklist_lock))
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a23af225c898..4f3dca353556 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -226,4 +226,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index de545ba85218..a6e04b4a21d7 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -279,6 +279,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus,
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/security.h b/include/linux/security.h
index 1d1df326c881..d0eb20f90b26 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <uapi/linux/lsm.h>
struct linux_binprm;
struct cred;
@@ -60,6 +61,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -138,6 +140,8 @@ enum lockdown_reason {
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+extern u32 lsm_active_cnt;
+extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -261,6 +265,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
int security_binder_set_context_mgr(const struct cred *mgr);
@@ -389,6 +394,8 @@ int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -470,10 +477,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -484,6 +494,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -501,6 +513,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -987,6 +1004,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
@@ -1337,14 +1361,28 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
return -EINVAL;
}
@@ -1395,6 +1433,12 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ size_t *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d6d6ffeeb9a2..b5f5ee8308d0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,7 +24,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
/*
- * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
-#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -788,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
-#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
-#endif
-
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index a61e7d55d0d3..000000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/kfence.h>
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x)
-{
- void *object = x - (x - slab->s_mem) % cache->size;
- void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- if (is_kfence_address(slab_address(slab)))
- return 1;
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index deb90cf4bffb..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-#include <linux/local_lock.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS
-};
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
- union {
- struct {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- };
- freelist_aba_t freelist_tid;
- };
- struct slab *slab; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
-#endif
- local_lock_t lock; /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
-#ifndef CONFIG_SLUB_TINY
- struct kmem_cache_cpu __percpu *cpu_slab;
-#endif
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- return slab->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 6b0a7dc48a4b..f866d5c8ed32 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -233,6 +233,8 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 255a0562aea5..471fe2ff9066 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -20,6 +20,9 @@
#include <uapi/linux/spi/spi.h>
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 4
+
struct dma_chan;
struct software_node;
struct ptp_system_timestamp;
@@ -132,7 +135,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
* The "active low" default for chipselect mode can be overridden
@@ -157,8 +161,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -167,6 +171,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @pcpu_statistics: statistics for the spi_device
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -182,7 +187,7 @@ struct spi_device {
struct spi_controller *controller;
struct spi_controller *master; /* Compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
+ u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -213,7 +218,7 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
struct spi_delay word_delay; /* Inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
@@ -223,6 +228,13 @@ struct spi_device {
/* The statistics */
struct spi_statistics __percpu *pcpu_statistics;
+ /* Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array.When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -279,22 +291,33 @@ static inline void *spi_get_drvdata(const struct spi_device *spi)
static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
{
- return spi->chip_select;
+ return spi->chip_select[idx];
}
static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
{
- spi->chip_select = chipselect;
+ spi->chip_select[idx] = chipselect;
}
static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
{
- return spi->cs_gpiod;
+ return spi->cs_gpiod[idx];
}
static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
{
- spi->cs_gpiod = csgpiod;
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
}
/**
@@ -399,6 +422,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @multi_cs_cap: indicates that the SPI Controller can assert/de-assert
+ * more than one chip select at once.
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -461,10 +486,13 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
@@ -567,6 +595,11 @@ struct spi_controller {
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
/* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
@@ -677,7 +710,8 @@ struct spi_controller {
bool rt;
bool auto_runtime_pm;
bool cur_msg_mapped;
- char last_cs;
+ char last_cs[SPI_CS_CNT_MAX];
+ char last_cs_index_mask;
bool last_cs_mode_high;
bool fallback;
struct completion xfer_completion;
@@ -1040,6 +1074,7 @@ struct spi_transfer {
unsigned len;
#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
dma_addr_t tx_dma;
@@ -1638,8 +1673,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
/* Compatibility layer */
#define spi_master spi_controller
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-
#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
#define spi_master_set_devdata(_ctlr, _data) \
spi_controller_set_devdata(_ctlr, _data)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 31d3d747a9db..90bc853cafb6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -507,6 +507,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
@@ -515,23 +517,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 6c461573434d..9dec4861d09f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -68,28 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-long vfs_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t *off_in,
- struct file *out, loff_t *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
-extern ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e58306783d8e..adcbb8f23600 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,8 +11,6 @@
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
- * Stack traces are never removed from the stack depot.
- *
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
@@ -32,6 +30,18 @@ typedef u32 depot_stack_handle_t;
*/
#define STACK_DEPOT_EXTRA_BITS 5
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
/*
* Using stack depot requires its initialization, which can be done in 3 ways:
*
@@ -69,31 +79,39 @@ static inline int stack_depot_early_init(void) { return 0; }
#endif
/**
- * __stack_depot_save - Save a stack trace to stack depot
+ * stack_depot_save_flags - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
- * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
*
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, stack depot can replenish the stack pools in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and fails if no space is left to store the stack trace.
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting @can_alloc to %false is required if
+ * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: Handle of the stack struct stored in depot, 0 on failure
*/
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t gfp_flags, bool can_alloc);
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags,
+ depot_flags_t depot_flags);
/**
* stack_depot_save - Save a stack trace to stack depot
@@ -102,8 +120,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
@@ -142,6 +163,18 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 5c4ae1a26183..d8dbef6b7fc2 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -12,7 +12,7 @@
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
@@ -188,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8);
*/
static inline u16 ssh_crc(const u8 *buf, size_t len)
{
- return crc_ccitt_false(0xffff, buf, len);
+ return crc_itu_t(0xffff, buf, len);
}
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f6dd6575b905..4db00ddad261 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
@@ -490,13 +487,12 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index fd9d12de7e92..5c0dbef55792 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -71,9 +71,12 @@ struct clone_args;
struct open_how;
struct mount_attr;
struct landlock_ruleset_attr;
+struct lsm_ctx;
enum landlock_rule_type;
struct cachestat_range;
struct cachestat;
+struct statmount;
+struct mnt_id_req;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -407,6 +410,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
+ struct statmount __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ u64 __user *buf, size_t bufsize,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
@@ -949,6 +958,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t *size, __u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, __u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 *ids, size_t *size, u32 flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index cee814d5d1ac..bf84595a4e86 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -51,18 +51,23 @@ enum thermal_notify_event {
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
+ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
+ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
+ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
};
/**
* struct thermal_trip - representation of a point in temperature domain
* @temperature: temperature value in miliCelsius
* @hysteresis: relative hysteresis in miliCelsius
+ * @threshold: trip crossing notification threshold miliCelsius
* @type: trip point type
* @priv: pointer to driver data associated with this trip
*/
struct thermal_trip {
int temperature;
int hysteresis;
+ int threshold;
enum thermal_trip_type type;
void *priv;
};
@@ -115,6 +120,7 @@ struct thermal_cooling_device {
* @id: unique id number for each thermal zone
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
+ * @removal: removal completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -149,11 +155,13 @@ struct thermal_cooling_device {
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
+ * @suspended: thermal zone suspend indicator
*/
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct completion removal;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -181,6 +189,7 @@ struct thermal_zone_device {
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
+ bool suspended;
};
/**
@@ -193,6 +202,8 @@ struct thermal_zone_device {
* thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
@@ -201,6 +212,8 @@ struct thermal_governor {
void (*unbind_from_tz)(struct thermal_zone_device *tz);
int (*throttle)(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
struct list_head governor_list;
};
@@ -280,10 +293,6 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
-
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip);
-
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
@@ -291,16 +300,11 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
-#ifdef CONFIG_THERMAL_ACPI
-int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
-int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
-#endif
-
#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register_with_trips(
const char *type,
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index b0542cd11aeb..415a7ca2b882 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -17,6 +17,7 @@
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct uid_gid_map;
typedef struct {
uid_t val;
@@ -138,6 +139,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return from_kgid(ns, gid) != (gid_t) -1;
}
+u32 map_id_down(struct uid_gid_map *map, u32 id);
+u32 map_id_up(struct uid_gid_map *map, u32 id);
+
#else
static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
@@ -186,6 +190,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return gid_valid(gid);
}
+static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
+
+static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
#endif /* CONFIG_USER_NS */
#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b6214cbf2a43..bea9c89922d9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -347,8 +347,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat);
-int import_single_range(int type, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i);
int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2dc19f40d05..e4056547fbe6 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -93,6 +93,17 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
+/* move_pages */
+void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 flags);
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr);
+
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 8abfa1240040..747943bc8cc2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,9 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
- PGDEMOTE_KSWAPD,
- PGDEMOTE_DIRECT,
- PGDEMOTE_KHUGEPAGED,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -145,6 +142,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index fed855bae6d8..343906a98d6e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -556,19 +556,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-void __mod_lruvec_page_state(struct page *page,
+void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
+ __lruvec_stat_mod_folio(folio, idx, val);
local_irq_restore(flags);
}
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ lruvec_stat_mod_folio(page_folio(page), idx, val);
+}
+
#else
static inline void __mod_lruvec_state(struct lruvec *lruvec,
@@ -583,37 +589,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
+ __mod_node_page_state(folio_pgdat(folio), idx, val);
}
-#endif /* CONFIG_MEMCG */
-
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, 1);
+ mod_node_page_state(folio_pgdat(folio), idx, val);
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, -1);
+ mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- __mod_lruvec_page_state(&folio->page, idx, val);
-}
+#endif /* CONFIG_MEMCG */
static inline void __lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
@@ -627,24 +621,6 @@ static inline void __lruvec_stat_sub_folio(struct folio *folio,
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
-}
-
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
-}
-
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_lruvec_page_state(&folio->page, idx, val);
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 24b1e5070f4d..b0b9604b76b8 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -491,7 +491,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 083387c00f0c..6d0a14f7019d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -193,7 +193,6 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a60ce39cfde..0b709f5bc65f 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -5,19 +5,41 @@
#include <linux/types.h>
#include <linux/mm_types.h>
+struct lruvec;
+
extern u64 zswap_pool_total_size;
extern atomic_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
+struct zswap_lruvec_state {
+ /*
+ * Number of pages in zswap that should be protected from the shrinker.
+ * This number is an estimate of the following counts:
+ *
+ * a) Recent page faults.
+ * b) Recent insertion to the zswap LRU. This includes new zswap stores,
+ * as well as recent zswap LRU rotations.
+ *
+ * These pages are likely to be warm, and might incur IO if the are written
+ * to swap.
+ */
+ atomic_long_t nr_zswap_protected;
+};
+
bool zswap_store(struct folio *folio);
bool zswap_load(struct folio *folio);
void zswap_invalidate(int type, pgoff_t offset);
void zswap_swapon(int type);
void zswap_swapoff(int type);
-
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool is_zswap_enabled(void);
#else
+struct zswap_lruvec_state {};
+
static inline bool zswap_store(struct folio *folio)
{
return false;
@@ -31,6 +53,14 @@ static inline bool zswap_load(struct folio *folio)
static inline void zswap_invalidate(int type, pgoff_t offset) {}
static inline void zswap_swapon(int type) {}
static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool is_zswap_enabled(void)
+{
+ return false;
+}
#endif
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 947973623dc7..60a7d0ce3080 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -30,7 +30,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = iph->ihl * 4;
+ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
if (pkt->skb->len < len)
return -1;
else if (len < thoff)
diff --git a/include/net/scm.h b/include/net/scm.h
index e8c76b4be2fe..cf68acec4d70 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -5,6 +5,7 @@
#include <linux/limits.h>
#include <linux/net.h>
#include <linux/cred.h>
+#include <linux/file.h>
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
@@ -208,5 +209,13 @@ static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
scm_destroy_cred(scm);
}
+static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
+ unsigned int flags)
+{
+ if (!ufd)
+ return -EFAULT;
+ return receive_fd(f, ufd, flags);
+}
+
#endif /* __LINUX_NET_SCM_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 144ba48bb07b..87f0e6c2e1f2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1788,8 +1788,6 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-#include <linux/jump_label.h>
-extern struct static_key_false_deferred tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family, bool any_l3index);
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 647781080613..b04afced4cc9 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -127,12 +127,35 @@ struct tcp_ao_info {
struct rcu_head rcu;
};
+#ifdef CONFIG_TCP_MD5SIG
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_md5_needed;
+#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key)
+#else
+#define static_branch_tcp_md5() false
+#endif
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_ao_needed;
+#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key)
+#else
+#define static_branch_tcp_ao() false
+#endif
+
+static inline bool tcp_hash_should_produce_warnings(void)
+{
+ return static_branch_tcp_md5() || static_branch_tcp_ao();
+}
+
#define tcp_hash_fail(msg, family, skb, fmt, ...) \
do { \
const struct tcphdr *th = tcp_hdr(skb); \
char hdr_flags[6]; \
char *f = hdr_flags; \
\
+ if (!tcp_hash_should_produce_warnings()) \
+ break; \
if (th->fin) \
*f++ = 'F'; \
if (th->syn) \
@@ -159,9 +182,6 @@ do { \
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
-#include <linux/jump_label.h>
-extern struct static_key_false_deferred tcp_ao_needed;
-
struct tcp4_ao_context {
__be32 saddr;
__be32 daddr;
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
index b5ac35c1d0e8..e728647b5d26 100644
--- a/include/trace/events/ksm.h
+++ b/include/trace/events/ksm.h
@@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item,
__entry->pfn, __entry->rmap_item, __entry->mm)
);
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
#endif /* _TRACE_KSM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6188ad0d9e0d..dbb01b4b7451 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
- __field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
__entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b4bc2828fa09..1ef58a04fc57 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
- * @flags: the timers flags
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -69,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 756b013fb832..75f00965ab15 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+
#undef __NR_syscalls
-#define __NR_syscalls 457
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index da43810b7485..48ad69f7722e 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -316,6 +316,7 @@ typedef int __bitwise __kernel_rwf_t;
#define PAGE_IS_SWAPPED (1 << 4)
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
/*
* struct page_region - Page region with flags
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 01766dd839b0..c17bb096ea68 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -25,6 +25,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
+#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
new file mode 100644
index 000000000000..f8aef9ade549
--- /dev/null
+++ b/include/uapi/linux/lsm.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Security Modules (LSM) - User space API
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_LSM_H
+#define _UAPI_LINUX_LSM_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+/**
+ * struct lsm_ctx - LSM context information
+ * @id: the LSM id number, see LSM_ID_XXX
+ * @flags: LSM specific flags
+ * @len: length of the lsm_ctx struct, @ctx and any other data or padding
+ * @ctx_len: the size of @ctx
+ * @ctx: the LSM context value
+ *
+ * The @len field MUST be equal to the size of the lsm_ctx struct
+ * plus any additional padding and/or data placed after @ctx.
+ *
+ * In all cases @ctx_len MUST be equal to the length of @ctx.
+ * If @ctx is a string value it should be nul terminated with
+ * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are
+ * supported.
+ *
+ * The @flags and @ctx fields SHOULD only be interpreted by the
+ * LSM specified by @id; they MUST be set to zero/0 when not used.
+ */
+struct lsm_ctx {
+ __u64 id;
+ __u64 flags;
+ __u64 len;
+ __u64 ctx_len;
+ __u8 ctx[] __counted_by(ctx_len);
+};
+
+/*
+ * ID tokens to identify Linux Security Modules (LSMs)
+ *
+ * These token values are used to uniquely identify specific LSMs
+ * in the kernel as well as in the kernel's LSM userspace API.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ID_UNDEF 0
+#define LSM_ID_CAPABILITY 100
+#define LSM_ID_SELINUX 101
+#define LSM_ID_SMACK 102
+#define LSM_ID_TOMOYO 103
+#define LSM_ID_APPARMOR 104
+#define LSM_ID_YAMA 105
+#define LSM_ID_LOADPIN 106
+#define LSM_ID_SAFESETID 107
+#define LSM_ID_LOCKDOWN 108
+#define LSM_ID_BPF 109
+#define LSM_ID_LANDLOCK 110
+
+/*
+ * LSM_ATTR_XXX definitions identify different LSM attributes
+ * which are used in the kernel's LSM userspace API. Support
+ * for these attributes vary across the different LSMs. None
+ * are required.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ATTR_UNDEF 0
+#define LSM_ATTR_CURRENT 100
+#define LSM_ATTR_EXEC 101
+#define LSM_ATTR_FSCREATE 102
+#define LSM_ATTR_KEYCREATE 103
+#define LSM_ATTR_PREV 104
+#define LSM_ATTR_SOCKCREATE 105
+
+/*
+ * LSM_FLAG_XXX definitions identify special handling instructions
+ * for the API.
+ */
+#define LSM_FLAG_SINGLE 0x0001
+
+#endif /* _UAPI_LINUX_LSM_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index bb242fdcfe6b..ad5478dbad00 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -138,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 39c6a250dd1b..3a64499b0f5d 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -982,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -1427,6 +1437,9 @@ struct perf_branch_entry {
reserved:31;
};
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
union perf_sample_weight {
__u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7cab2c65d3d7..2f2ee82d5517 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 0dbc81015018..2841e4ea8f2c 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -41,7 +41,8 @@
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
UFFD_FEATURE_POISON | \
- UFFD_FEATURE_WP_ASYNC)
+ UFFD_FEATURE_WP_ASYNC | \
+ UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -50,6 +51,7 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_MOVE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
(__u64)1 << _UFFDIO_POISON)
@@ -73,6 +75,7 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_POISON (0x08)
@@ -92,6 +95,8 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
+ struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
@@ -222,6 +227,9 @@ struct uffdio_api {
* asynchronous mode is supported in which the write fault is
* automatically resolved and write-protection is un-set.
* It implies UFFD_FEATURE_WP_UNPOPULATED.
+ *
+ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an
+ * existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -239,6 +247,7 @@ struct uffdio_api {
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
#define UFFD_FEATURE_WP_ASYNC (1<<15)
+#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@@ -347,6 +356,24 @@ struct uffdio_poison {
__s64 updated;
};
+struct uffdio_move {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * Especially if used to atomically remove memory from the
+ * address space the wake on the dst range is not needed.
+ */
+#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
+ __u64 mode;
+ /*
+ * "move" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 move;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h
new file mode 100644
index 000000000000..71bf71a22e7f
--- /dev/null
+++ b/include/uapi/regulator/regulator.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Regulator uapi header
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef _UAPI_REGULATOR_H
+#define _UAPI_REGULATOR_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems escalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
+
+struct reg_genl_event {
+ char reg_name[32];
+ uint64_t event;
+};
+
+/* attributes of reg_genl_family */
+enum {
+ REG_GENL_ATTR_UNSPEC,
+ REG_GENL_ATTR_EVENT, /* reg event info needed by user space */
+ __REG_GENL_ATTR_MAX,
+};
+
+#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1)
+
+/* commands supported by the reg_genl_family */
+enum {
+ REG_GENL_CMD_UNSPEC,
+ REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */
+ __REG_GENL_CMD_MAX,
+};
+
+#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1)
+
+#define REG_GENL_FAMILY_NAME "reg_event"
+#define REG_GENL_VERSION 0x01
+#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group"
+
+#endif /* _UAPI_REGULATOR_H */
diff --git a/init/Kconfig b/init/Kconfig
index 9ffb103fc927..8df18f3a9748 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1676,6 +1676,56 @@ config MEMBARRIER
If unsure, say Y.
+config KCMP
+ bool "Enable kcmp() system call" if EXPERT
+ help
+ Enable the kernel resource comparison system call. It provides
+ user-space with the ability to compare two processes to see if they
+ share a common resource, such as a file descriptor or even virtual
+ memory space.
+
+ If unsure, say N.
+
+config RSEQ
+ bool "Enable rseq() system call" if EXPERT
+ default y
+ depends on HAVE_RSEQ
+ select MEMBARRIER
+ help
+ Enable the restartable sequences system call. It provides a
+ user-space cache for the current CPU number value, which
+ speeds up getting the current CPU number from user-space,
+ as well as an ABI to speed up user-space operations on
+ per-CPU data.
+
+ If unsure, say Y.
+
+config DEBUG_RSEQ
+ default n
+ bool "Enable debugging of rseq() system call" if EXPERT
+ depends on RSEQ && DEBUG_KERNEL
+ help
+ Enable extra debugging checks for the rseq system call.
+
+ If unsure, say N.
+
+config CACHESTAT_SYSCALL
+ bool "Enable cachestat() system call" if EXPERT
+ default y
+ help
+ Enable the cachestat system call, which queries the page cache
+ statistics of a file (number of cached pages, dirty pages,
+ pages marked for writeback, (recently) evicted pages).
+
+ If unsure say Y here.
+
+config PC104
+ bool "PC/104 support" if EXPERT
+ help
+ Expose PC/104 form factor device drivers and options available for
+ selection and configuration. Enable this option if your target
+ machine has a PC/104 bus.
+
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
@@ -1740,57 +1790,12 @@ config KALLSYMS_BASE_RELATIVE
# end of the "standard kernel features (expert users)" menu
-# syscall, maps, verifier
-
config ARCH_HAS_MEMBARRIER_CALLBACKS
bool
config ARCH_HAS_MEMBARRIER_SYNC_CORE
bool
-config KCMP
- bool "Enable kcmp() system call" if EXPERT
- help
- Enable the kernel resource comparison system call. It provides
- user-space with the ability to compare two processes to see if they
- share a common resource, such as a file descriptor or even virtual
- memory space.
-
- If unsure, say N.
-
-config RSEQ
- bool "Enable rseq() system call" if EXPERT
- default y
- depends on HAVE_RSEQ
- select MEMBARRIER
- help
- Enable the restartable sequences system call. It provides a
- user-space cache for the current CPU number value, which
- speeds up getting the current CPU number from user-space,
- as well as an ABI to speed up user-space operations on
- per-CPU data.
-
- If unsure, say Y.
-
-config CACHESTAT_SYSCALL
- bool "Enable cachestat() system call" if EXPERT
- default y
- help
- Enable the cachestat system call, which queries the page cache
- statistics of a file (number of cached pages, dirty pages,
- pages marked for writeback, (recently) evicted pages).
-
- If unsure say Y here.
-
-config DEBUG_RSEQ
- default n
- bool "Enabled debugging of rseq() system call" if EXPERT
- depends on RSEQ && DEBUG_KERNEL
- help
- Enable extra debugging checks for the rseq system call.
-
- If unsure, say N.
-
config HAVE_PERF_EVENTS
bool
help
@@ -1805,13 +1810,6 @@ config PERF_USE_VMALLOC
help
See tools/perf/design.txt for details
-config PC104
- bool "PC/104 support" if EXPERT
- help
- Expose PC/104 form factor device drivers and options available for
- selection and configuration. Enable this option if your target
- machine has a PC/104 bus.
-
menu "Kernel Performance Events And Counters"
config PERF_EVENTS
diff --git a/init/init_task.c b/init/init_task.c
index 5727d42149c3..6f6485d554df 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -51,8 +51,7 @@ static struct sighand_struct init_sighand = {
};
#ifdef CONFIG_SHADOW_CALL_STACK
-unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)]
- __init_task_data = {
+unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)] = {
[(SCS_SIZE / sizeof(long)) - 1] = SCS_END_MAGIC
};
#endif
@@ -61,12 +60,7 @@ unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)]
* Set up the first task table, touch at your own risk!. Base=0,
* limit=0x1fffff (=2MB)
*/
-struct task_struct init_task
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
- __init_task_data
-#endif
- __aligned(L1_CACHE_BYTES)
-= {
+struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
#ifdef CONFIG_THREAD_INFO_IN_TASK
.thread_info = INIT_THREAD_INFO(init_task),
.stack_refcount = REFCOUNT_INIT(1),
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 241245cb54a6..bf2fb26a6539 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -16,8 +16,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
if (cache->nr_cached < cache->max_cached) {
cache->nr_cached++;
wq_stack_add_head(&entry->node, &cache->list);
- /* KASAN poisons object */
- kasan_slab_free_mempool(entry);
+ kasan_mempool_poison_object(entry);
return true;
}
return false;
@@ -34,7 +33,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
struct io_cache_entry *entry;
entry = container_of(cache->list.next, struct io_cache_entry, node);
- kasan_unpoison_range(entry, cache->elem_size);
+ kasan_mempool_unpoison_object(entry, cache->elem_size);
cache->list.next = cache->list.next->next;
cache->nr_cached--;
return entry;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 9626a363f121..9839016f82ea 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -561,7 +561,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
int ops = atomic_xchg(&ev_fd->ops, 0);
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
- eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
* ordering in a race but if references are 0 we know we have to free
@@ -597,7 +597,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
goto out;
if (likely(eventfd_signal_allowed())) {
- eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
} else {
atomic_inc(&ev_fd->refs);
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index fb73adb89067..74fc22461f48 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -241,7 +241,7 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
- file = __close_fd_get_file(close->fd);
+ file = file_close_fd_locked(files, close->fd);
spin_unlock(&files->file_lock);
if (!file)
goto err;
diff --git a/io_uring/splice.c b/io_uring/splice.c
index 7c4469e9540e..3b659cd23e9d 100644
--- a/io_uring/splice.c
+++ b/io_uring/splice.c
@@ -51,7 +51,7 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags)
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
struct file *in;
- long ret = 0;
+ ssize_t ret = 0;
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
@@ -92,7 +92,7 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
loff_t *poff_in, *poff_out;
struct file *in;
- long ret = 0;
+ ssize_t ret = 0;
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
diff --git a/kernel/async.c b/kernel/async.c
index b2c4ba5686ee..673bba6bdf3a 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
+static async_cookie_t __async_schedule_node_domain(async_func_t func,
+ void *data, int node,
+ struct async_domain *domain,
+ struct async_entry *entry)
+{
+ async_cookie_t newcookie;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&entry->domain_list);
+ INIT_LIST_HEAD(&entry->global_list);
+ INIT_WORK(&entry->work, async_run_entry_fn);
+ entry->func = func;
+ entry->data = data;
+ entry->domain = domain;
+
+ spin_lock_irqsave(&async_lock, flags);
+
+ /* allocate cookie and queue */
+ newcookie = entry->cookie = next_cookie++;
+
+ list_add_tail(&entry->domain_list, &domain->pending);
+ if (domain->registered)
+ list_add_tail(&entry->global_list, &async_global_pending);
+
+ atomic_inc(&entry_count);
+ spin_unlock_irqrestore(&async_lock, flags);
+
+ /* schedule for execution */
+ queue_work_node(node, system_unbound_wq, &entry->work);
+
+ return newcookie;
+}
+
/**
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
* @func: function to execute asynchronously
@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
func(data, newcookie);
return newcookie;
}
- INIT_LIST_HEAD(&entry->domain_list);
- INIT_LIST_HEAD(&entry->global_list);
- INIT_WORK(&entry->work, async_run_entry_fn);
- entry->func = func;
- entry->data = data;
- entry->domain = domain;
-
- spin_lock_irqsave(&async_lock, flags);
-
- /* allocate cookie and queue */
- newcookie = entry->cookie = next_cookie++;
-
- list_add_tail(&entry->domain_list, &domain->pending);
- if (domain->registered)
- list_add_tail(&entry->global_list, &async_global_pending);
-
- atomic_inc(&entry_count);
- spin_unlock_irqrestore(&async_lock, flags);
-
- /* schedule for execution */
- queue_work_node(node, system_unbound_wq, &entry->work);
- return newcookie;
+ return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
@@ -232,6 +244,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
EXPORT_SYMBOL_GPL(async_schedule_node);
/**
+ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function.
+ *
+ * If the asynchronous execution of @func is scheduled successfully, return
+ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
+ * that will run the function synchronously then.
+ */
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
+{
+ struct async_entry *entry;
+
+ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
+
+ /* Give up if there is no memory or too much work. */
+ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
+ kfree(entry);
+ return false;
+ }
+
+ __async_schedule_node_domain(func, dev, dev_to_node(dev),
+ &async_dfl_domain, entry);
+ return true;
+}
+
+/**
* async_synchronize_full - synchronize all asynchronous function calls
*
* This function waits until all asynchronous function calls have been done.
diff --git a/kernel/audit.c b/kernel/audit.c
index 16205dd29843..9c8e5f732c4c 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -487,15 +487,19 @@ static void auditd_conn_free(struct rcu_head *rcu)
* @pid: auditd PID
* @portid: auditd netlink portid
* @net: auditd network namespace pointer
+ * @skb: the netlink command from the audit daemon
+ * @ack: netlink ack flag, cleared if ack'd here
*
* Description:
* This function will obtain and drop network namespace references as
* necessary. Returns zero on success, negative values on failure.
*/
-static int auditd_set(struct pid *pid, u32 portid, struct net *net)
+static int auditd_set(struct pid *pid, u32 portid, struct net *net,
+ struct sk_buff *skb, bool *ack)
{
unsigned long flags;
struct auditd_connection *ac_old, *ac_new;
+ struct nlmsghdr *nlh;
if (!pid || !net)
return -EINVAL;
@@ -507,6 +511,13 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net)
ac_new->portid = portid;
ac_new->net = get_net(net);
+ /* send the ack now to avoid a race with the queue backlog */
+ if (*ack) {
+ nlh = nlmsg_hdr(skb);
+ netlink_ack(skb, nlh, 0, NULL);
+ *ack = false;
+ }
+
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
@@ -1200,7 +1211,8 @@ static int audit_replace(struct pid *pid)
return auditd_send_unicast_skb(skb);
}
-static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ bool *ack)
{
u32 seq;
void *data;
@@ -1293,7 +1305,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
/* register a new auditd connection */
err = auditd_set(req_pid,
NETLINK_CB(skb).portid,
- sock_net(NETLINK_CB(skb).sk));
+ sock_net(NETLINK_CB(skb).sk),
+ skb, ack);
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
@@ -1538,9 +1551,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
* Parse the provided skb and deal with any messages that may be present,
* malformed skbs are discarded.
*/
-static void audit_receive(struct sk_buff *skb)
+static void audit_receive(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
+ bool ack;
/*
* len MUST be signed for nlmsg_next to be able to dec it below 0
* if the nlmsg_len was not aligned
@@ -1553,9 +1567,12 @@ static void audit_receive(struct sk_buff *skb)
audit_ctl_lock();
while (nlmsg_ok(nlh, len)) {
- err = audit_receive_msg(skb, nlh);
- /* if err or if this message says it wants a response */
- if (err || (nlh->nlmsg_flags & NLM_F_ACK))
+ ack = nlh->nlmsg_flags & NLM_F_ACK;
+ err = audit_receive_msg(skb, nlh, &ack);
+
+ /* send an ack if the user asked for one and audit_receive_msg
+ * didn't already do it, or if there was an error. */
+ if (ack || err)
netlink_ack(skb, nlh, err, NULL);
nlh = nlmsg_next(nlh, &len);
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index c56071f150f2..520b90dd97ec 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -164,13 +164,13 @@ struct cgroup_mgctx {
#define DEFINE_CGROUP_MGCTX(name) \
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
-extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
/* iterate across the hierarchies */
#define for_each_root(root) \
- list_for_each_entry((root), &cgroup_roots, root_list)
+ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
+ lockdep_is_held(&cgroup_mutex))
/**
* for_each_subsys - iterate all enabled cgroup subsystems
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 76db6c67e39a..04d11a7dd95f 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1262,6 +1262,40 @@ int cgroup1_get_tree(struct fs_context *fc)
return ret;
}
+/**
+ * task_get_cgroup1 - Acquires the associated cgroup of a task within a
+ * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
+ * hierarchy ID.
+ * @tsk: The target task
+ * @hierarchy_id: The ID of a cgroup1 hierarchy
+ *
+ * On success, the cgroup is returned. On failure, ERR_PTR is returned.
+ * We limit it to cgroup1 only.
+ */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id)
+{
+ struct cgroup *cgrp = ERR_PTR(-ENOENT);
+ struct cgroup_root *root;
+ unsigned long flags;
+
+ rcu_read_lock();
+ for_each_root(root) {
+ /* cgroup1 only*/
+ if (root == &cgrp_dfl_root)
+ continue;
+ if (root->hierarchy_id != hierarchy_id)
+ continue;
+ spin_lock_irqsave(&css_set_lock, flags);
+ cgrp = task_cgroup_from_root(tsk, root);
+ if (!cgrp || !cgroup_tryget(cgrp))
+ cgrp = ERR_PTR(-ENOENT);
+ spin_unlock_irqrestore(&css_set_lock, flags);
+ break;
+ }
+ rcu_read_unlock();
+ return cgrp;
+}
+
static int __init cgroup1_wq_init(void)
{
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 4b9ff41ca603..8f3cef1a4d8a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
void cgroup_free_root(struct cgroup_root *root)
{
- kfree(root);
+ kfree_rcu(root, rcu);
}
static void cgroup_destroy_root(struct cgroup_root *root)
@@ -1347,10 +1347,9 @@ static void cgroup_destroy_root(struct cgroup_root *root)
spin_unlock_irq(&css_set_lock);
- if (!list_empty(&root->root_list)) {
- list_del(&root->root_list);
- cgroup_root_count--;
- }
+ WARN_ON_ONCE(list_empty(&root->root_list));
+ list_del_rcu(&root->root_list);
+ cgroup_root_count--;
if (!have_favordynmods)
cgroup_favor_dynmods(root, false);
@@ -1390,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
}
}
- BUG_ON(!res_cgroup);
+ /*
+ * If cgroup_mutex is not held, the cgrp_cset_link will be freed
+ * before we remove the cgroup root from the root_list. Consequently,
+ * when accessing a cgroup root, the cset_link may have already been
+ * freed, resulting in a NULL res_cgroup. However, by holding the
+ * cgroup_mutex, we ensure that res_cgroup can't be NULL.
+ * If we don't hold cgroup_mutex in the caller, we must do the NULL
+ * check.
+ */
return res_cgroup;
}
@@ -1413,6 +1420,11 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
rcu_read_unlock();
+ /*
+ * The namespace_sem is held by current, so the root cgroup can't
+ * be umounted. Therefore, we can ensure that the res is non-NULL.
+ */
+ WARN_ON_ONCE(!res);
return res;
}
@@ -1449,7 +1461,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
- lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
return __cset_cgroup_from_root(cset, root);
@@ -1457,7 +1468,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
/*
* Return the cgroup for "task" from the given hierarchy. Must be
- * called with cgroup_mutex and css_set_lock held.
+ * called with css_set_lock held to prevent task's groups from being modified.
+ * Must be called with either cgroup_mutex or rcu read lock to prevent the
+ * cgroup root from being destroyed.
*/
struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
@@ -2032,7 +2045,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp;
- INIT_LIST_HEAD(&root->root_list);
+ INIT_LIST_HEAD_RCU(&root->root_list);
atomic_set(&root->nr_cgrps, 1);
cgrp->root = root;
init_cgroup_housekeeping(cgrp);
@@ -2115,7 +2128,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
* care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path.
*/
- list_add(&root->root_list, &cgroup_roots);
+ list_add_rcu(&root->root_list, &cgroup_roots);
cgroup_root_count++;
/*
@@ -6265,7 +6278,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (!buf)
goto out;
- cgroup_lock();
+ rcu_read_lock();
spin_lock_irq(&css_set_lock);
for_each_root(root) {
@@ -6276,6 +6289,11 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
continue;
+ cgrp = task_cgroup_from_root(tsk, root);
+ /* The root has already been unmounted. */
+ if (!cgrp)
+ continue;
+
seq_printf(m, "%d:", root->hierarchy_id);
if (root != &cgrp_dfl_root)
for_each_subsys(ss, ssid)
@@ -6286,9 +6304,6 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
-
- cgrp = task_cgroup_from_root(tsk, root);
-
/*
* On traditional hierarchies, all zombie tasks show up as
* belonging to the root cgroup. On the default hierarchy,
@@ -6320,7 +6335,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
retval = 0;
out_unlock:
spin_unlock_irq(&css_set_lock);
- cgroup_unlock();
+ rcu_read_unlock();
kfree(buf);
out:
return retval;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 615daaf87f1f..dfbb16aca9f4 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -25,6 +25,7 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
#include <linux/sched/isolation.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
@@ -204,6 +206,11 @@ struct cpuset {
*/
static cpumask_var_t subpartitions_cpus;
+/*
+ * Exclusive CPUs in isolated partitions
+ */
+static cpumask_var_t isolated_cpus;
+
/* List of remote partition root children */
static struct list_head remote_children;
@@ -1317,6 +1324,7 @@ static void compute_effective_cpumask(struct cpumask *new_cpus,
*/
enum partition_cmd {
partcmd_enable, /* Enable partition root */
+ partcmd_enablei, /* Enable isolated partition root */
partcmd_disable, /* Disable partition root */
partcmd_update, /* Update parent's effective_cpus */
partcmd_invalidate, /* Make partition invalid */
@@ -1419,6 +1427,109 @@ static void reset_partition_data(struct cpuset *cs)
}
/*
+ * partition_xcpus_newstate - Exclusive CPUs state change
+ * @old_prs: old partition_root_state
+ * @new_prs: new partition_root_state
+ * @xcpus: exclusive CPUs with state change
+ */
+static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
+{
+ WARN_ON_ONCE(old_prs == new_prs);
+ if (new_prs == PRS_ISOLATED)
+ cpumask_or(isolated_cpus, isolated_cpus, xcpus);
+ else
+ cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
+}
+
+/*
+ * partition_xcpus_add - Add new exclusive CPUs to partition
+ * @new_prs: new partition_root_state
+ * @parent: parent cpuset
+ * @xcpus: exclusive CPUs to be added
+ * Return: true if isolated_cpus modified, false otherwise
+ *
+ * Remote partition if parent == NULL
+ */
+static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
+ struct cpumask *xcpus)
+{
+ bool isolcpus_updated;
+
+ WARN_ON_ONCE(new_prs < 0);
+ lockdep_assert_held(&callback_lock);
+ if (!parent)
+ parent = &top_cpuset;
+
+
+ if (parent == &top_cpuset)
+ cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
+
+ isolcpus_updated = (new_prs != parent->partition_root_state);
+ if (isolcpus_updated)
+ partition_xcpus_newstate(parent->partition_root_state, new_prs,
+ xcpus);
+
+ cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
+ return isolcpus_updated;
+}
+
+/*
+ * partition_xcpus_del - Remove exclusive CPUs from partition
+ * @old_prs: old partition_root_state
+ * @parent: parent cpuset
+ * @xcpus: exclusive CPUs to be removed
+ * Return: true if isolated_cpus modified, false otherwise
+ *
+ * Remote partition if parent == NULL
+ */
+static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
+ struct cpumask *xcpus)
+{
+ bool isolcpus_updated;
+
+ WARN_ON_ONCE(old_prs < 0);
+ lockdep_assert_held(&callback_lock);
+ if (!parent)
+ parent = &top_cpuset;
+
+ if (parent == &top_cpuset)
+ cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
+
+ isolcpus_updated = (old_prs != parent->partition_root_state);
+ if (isolcpus_updated)
+ partition_xcpus_newstate(old_prs, parent->partition_root_state,
+ xcpus);
+
+ cpumask_and(xcpus, xcpus, cpu_active_mask);
+ cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
+ return isolcpus_updated;
+}
+
+static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+{
+ int ret;
+
+ lockdep_assert_cpus_held();
+
+ if (!isolcpus_updated)
+ return;
+
+ ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
+ WARN_ON_ONCE(ret < 0);
+}
+
+/**
+ * cpuset_cpu_is_isolated - Check if the given CPU is isolated
+ * @cpu: the CPU number to be checked
+ * Return: true if CPU is used in an isolated partition, false otherwise
+ */
+bool cpuset_cpu_is_isolated(int cpu)
+{
+ return cpumask_test_cpu(cpu, isolated_cpus);
+}
+EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
+
+/*
* compute_effective_exclusive_cpumask - compute effective exclusive CPUs
* @cs: cpuset
* @xcpus: effective exclusive CPUs value to be set
@@ -1456,14 +1567,18 @@ static inline bool is_local_partition(struct cpuset *cs)
/*
* remote_partition_enable - Enable current cpuset as a remote partition root
* @cs: the cpuset to update
+ * @new_prs: new partition_root_state
* @tmp: temparary masks
* Return: 1 if successful, 0 if error
*
* Enable the current cpuset to become a remote partition root taking CPUs
* directly from the top cpuset. cpuset_mutex must be held by the caller.
*/
-static int remote_partition_enable(struct cpuset *cs, struct tmpmasks *tmp)
+static int remote_partition_enable(struct cpuset *cs, int new_prs,
+ struct tmpmasks *tmp)
{
+ bool isolcpus_updated;
+
/*
* The user must have sysadmin privilege.
*/
@@ -1485,26 +1600,22 @@ static int remote_partition_enable(struct cpuset *cs, struct tmpmasks *tmp)
return 0;
spin_lock_irq(&callback_lock);
- cpumask_andnot(top_cpuset.effective_cpus,
- top_cpuset.effective_cpus, tmp->new_cpus);
- cpumask_or(subpartitions_cpus,
- subpartitions_cpus, tmp->new_cpus);
-
+ isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
+ list_add(&cs->remote_sibling, &remote_children);
if (cs->use_parent_ecpus) {
struct cpuset *parent = parent_cs(cs);
cs->use_parent_ecpus = false;
parent->child_ecpus_count--;
}
- list_add(&cs->remote_sibling, &remote_children);
spin_unlock_irq(&callback_lock);
+ update_unbound_workqueue_cpumask(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
*/
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
-
return 1;
}
@@ -1519,23 +1630,22 @@ static int remote_partition_enable(struct cpuset *cs, struct tmpmasks *tmp)
*/
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
{
+ bool isolcpus_updated;
+
compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
WARN_ON_ONCE(!is_remote_partition(cs));
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus));
spin_lock_irq(&callback_lock);
- cpumask_andnot(subpartitions_cpus,
- subpartitions_cpus, tmp->new_cpus);
- cpumask_and(tmp->new_cpus,
- tmp->new_cpus, cpu_active_mask);
- cpumask_or(top_cpuset.effective_cpus,
- top_cpuset.effective_cpus, tmp->new_cpus);
list_del_init(&cs->remote_sibling);
+ isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
+ NULL, tmp->new_cpus);
cs->partition_root_state = -cs->partition_root_state;
if (!cs->prs_err)
cs->prs_err = PERR_INVCPUS;
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
+ update_unbound_workqueue_cpumask(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1557,6 +1667,8 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
struct tmpmasks *tmp)
{
bool adding, deleting;
+ int prs = cs->partition_root_state;
+ int isolcpus_updated = 0;
if (WARN_ON_ONCE(!is_remote_partition(cs)))
return;
@@ -1580,21 +1692,12 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
goto invalidate;
spin_lock_irq(&callback_lock);
- if (adding) {
- cpumask_or(subpartitions_cpus,
- subpartitions_cpus, tmp->addmask);
- cpumask_andnot(top_cpuset.effective_cpus,
- top_cpuset.effective_cpus, tmp->addmask);
- }
- if (deleting) {
- cpumask_andnot(subpartitions_cpus,
- subpartitions_cpus, tmp->delmask);
- cpumask_and(tmp->delmask,
- tmp->delmask, cpu_active_mask);
- cpumask_or(top_cpuset.effective_cpus,
- top_cpuset.effective_cpus, tmp->delmask);
- }
+ if (adding)
+ isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
+ if (deleting)
+ isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
spin_unlock_irq(&callback_lock);
+ update_unbound_workqueue_cpumask(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1676,11 +1779,11 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
* @tmp: Temporary addmask and delmask
* Return: 0 or a partition root state error code
*
- * For partcmd_enable, the cpuset is being transformed from a non-partition
- * root to a partition root. The effective_xcpus (cpus_allowed if effective_xcpus
- * not set) mask of the given cpuset will be taken away from parent's
- * effective_cpus. The function will return 0 if all the CPUs listed in
- * effective_xcpus can be granted or an error code will be returned.
+ * For partcmd_enable*, the cpuset is being transformed from a non-partition
+ * root to a partition root. The effective_xcpus (cpus_allowed if
+ * effective_xcpus not set) mask of the given cpuset will be taken away from
+ * parent's effective_cpus. The function will return 0 if all the CPUs listed
+ * in effective_xcpus can be granted or an error code will be returned.
*
* For partcmd_disable, the cpuset is being transformed from a partition
* root back to a non-partition root. Any CPUs in effective_xcpus will be
@@ -1695,7 +1798,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
*
* For partcmd_invalidate, the current partition will be made invalid.
*
- * The partcmd_enable and partcmd_disable commands are used by
+ * The partcmd_enable* and partcmd_disable commands are used by
* update_prstate(). An error code may be returned and the caller will check
* for error.
*
@@ -1716,6 +1819,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
int part_error = PERR_NONE; /* Partition error? */
int subparts_delta = 0;
struct cpumask *xcpus; /* cs effective_xcpus */
+ int isolcpus_updated = 0;
bool nocpu;
lockdep_assert_held(&cpuset_mutex);
@@ -1760,7 +1864,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
nocpu = tasks_nocpu_error(parent, cs, xcpus);
- if (cmd == partcmd_enable) {
+ if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
/*
* Enabling partition root is not allowed if its
* effective_xcpus is empty or doesn't overlap with
@@ -1783,6 +1887,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
cpumask_copy(tmp->delmask, xcpus);
deleting = true;
subparts_delta++;
+ new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
} else if (cmd == partcmd_disable) {
/*
* May need to add cpus to parent's effective_cpus for
@@ -1792,6 +1897,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus);
if (adding)
subparts_delta--;
+ new_prs = PRS_MEMBER;
} else if (newmask) {
/*
* Empty cpumask is not allowed
@@ -1940,38 +2046,28 @@ write_error:
* newly deleted ones will be added back to effective_cpus.
*/
spin_lock_irq(&callback_lock);
- if (adding) {
- if (parent == &top_cpuset)
- cpumask_andnot(subpartitions_cpus,
- subpartitions_cpus, tmp->addmask);
- /*
- * Some of the CPUs in effective_xcpus might have been offlined.
- */
- cpumask_or(parent->effective_cpus,
- parent->effective_cpus, tmp->addmask);
- cpumask_and(parent->effective_cpus,
- parent->effective_cpus, cpu_active_mask);
- }
- if (deleting) {
- if (parent == &top_cpuset)
- cpumask_or(subpartitions_cpus,
- subpartitions_cpus, tmp->delmask);
- cpumask_andnot(parent->effective_cpus,
- parent->effective_cpus, tmp->delmask);
- }
-
- if (is_partition_valid(parent)) {
- parent->nr_subparts += subparts_delta;
- WARN_ON_ONCE(parent->nr_subparts < 0);
- }
-
if (old_prs != new_prs) {
cs->partition_root_state = new_prs;
if (new_prs <= 0)
cs->nr_subparts = 0;
}
+ /*
+ * Adding to parent's effective_cpus means deletion CPUs from cs
+ * and vice versa.
+ */
+ if (adding)
+ isolcpus_updated += partition_xcpus_del(old_prs, parent,
+ tmp->addmask);
+ if (deleting)
+ isolcpus_updated += partition_xcpus_add(new_prs, parent,
+ tmp->delmask);
+ if (is_partition_valid(parent)) {
+ parent->nr_subparts += subparts_delta;
+ WARN_ON_ONCE(parent->nr_subparts < 0);
+ }
spin_unlock_irq(&callback_lock);
+ update_unbound_workqueue_cpumask(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive(cs, new_prs);
@@ -2948,6 +3044,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
int err = PERR_NONE, old_prs = cs->partition_root_state;
struct cpuset *parent = parent_cs(cs);
struct tmpmasks tmpmask;
+ bool new_xcpus_state = false;
if (old_prs == new_prs)
return 0;
@@ -2977,6 +3074,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
goto out;
if (!old_prs) {
+ enum partition_cmd cmd = (new_prs == PRS_ROOT)
+ ? partcmd_enable : partcmd_enablei;
+
/*
* cpus_allowed cannot be empty.
*/
@@ -2985,19 +3085,18 @@ static int update_prstate(struct cpuset *cs, int new_prs)
goto out;
}
- err = update_parent_effective_cpumask(cs, partcmd_enable,
- NULL, &tmpmask);
+ err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
/*
* If an attempt to become local partition root fails,
* try to become a remote partition root instead.
*/
- if (err && remote_partition_enable(cs, &tmpmask))
+ if (err && remote_partition_enable(cs, new_prs, &tmpmask))
err = 0;
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
*/
- ;
+ new_xcpus_state = true;
} else {
/*
* Switching back to member is always allowed even if it
@@ -3029,7 +3128,10 @@ out:
WRITE_ONCE(cs->prs_err, err);
if (!is_partition_valid(cs))
reset_partition_data(cs);
+ else if (new_xcpus_state)
+ partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
+ update_unbound_workqueue_cpumask(new_xcpus_state);
/* Force update if switching back to member */
update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
@@ -3386,6 +3488,7 @@ typedef enum {
FILE_SUBPARTS_CPULIST,
FILE_EXCLUSIVE_CPULIST,
FILE_EFFECTIVE_XCPULIST,
+ FILE_ISOLATED_CPULIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
@@ -3582,6 +3685,9 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
case FILE_SUBPARTS_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
break;
+ case FILE_ISOLATED_CPULIST:
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
+ break;
default:
ret = -EINVAL;
}
@@ -3875,6 +3981,13 @@ static struct cftype dfl_files[] = {
.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
},
+ {
+ .name = "cpus.isolated",
+ .seq_show = cpuset_common_seq_show,
+ .private = FILE_ISOLATED_CPULIST,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
{ } /* terminate */
};
@@ -4194,6 +4307,7 @@ int __init cpuset_init(void)
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
@@ -4306,6 +4420,30 @@ void cpuset_force_rebuild(void)
force_rebuild = true;
}
+/*
+ * Attempt to acquire a cpus_read_lock while a hotplug operation may be in
+ * progress.
+ * Return: true if successful, false otherwise
+ *
+ * To avoid circular lock dependency between cpuset_mutex and cpus_read_lock,
+ * cpus_read_trylock() is used here to acquire the lock.
+ */
+static bool cpuset_hotplug_cpus_read_trylock(void)
+{
+ int retries = 0;
+
+ while (!cpus_read_trylock()) {
+ /*
+ * CPU hotplug still in progress. Retry 5 times
+ * with a 10ms wait before bailing out.
+ */
+ if (++retries > 5)
+ return false;
+ msleep(10);
+ }
+ return true;
+}
+
/**
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
* @cs: cpuset in interest
@@ -4322,6 +4460,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
bool cpus_updated;
bool mems_updated;
bool remote;
+ int partcmd = -1;
struct cpuset *parent;
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
@@ -4353,11 +4492,13 @@ retry:
compute_partition_effective_cpumask(cs, &new_cpus);
if (remote && cpumask_empty(&new_cpus) &&
- partition_is_populated(cs, NULL)) {
+ partition_is_populated(cs, NULL) &&
+ cpuset_hotplug_cpus_read_trylock()) {
remote_partition_disable(cs, tmp);
compute_effective_cpumask(&new_cpus, cs, parent);
remote = false;
cpuset_force_rebuild();
+ cpus_read_unlock();
}
/*
@@ -4368,18 +4509,28 @@ retry:
* partitions.
*/
if (is_local_partition(cs) && (!is_partition_valid(parent) ||
- tasks_nocpu_error(parent, cs, &new_cpus))) {
- update_parent_effective_cpumask(cs, partcmd_invalidate, NULL, tmp);
- compute_effective_cpumask(&new_cpus, cs, parent);
- cpuset_force_rebuild();
- }
+ tasks_nocpu_error(parent, cs, &new_cpus)))
+ partcmd = partcmd_invalidate;
/*
* On the other hand, an invalid partition root may be transitioned
* back to a regular one.
*/
- else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
- update_parent_effective_cpumask(cs, partcmd_update, NULL, tmp);
- if (is_partition_valid(cs)) {
+ else if (is_partition_valid(parent) && is_partition_invalid(cs))
+ partcmd = partcmd_update;
+
+ /*
+ * cpus_read_lock needs to be held before calling
+ * update_parent_effective_cpumask(). To avoid circular lock
+ * dependency between cpuset_mutex and cpus_read_lock,
+ * cpus_read_trylock() is used here to acquire the lock.
+ */
+ if (partcmd >= 0) {
+ if (!cpuset_hotplug_cpus_read_trylock())
+ goto update_tasks;
+
+ update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
+ cpus_read_unlock();
+ if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
compute_partition_effective_cpumask(cs, &new_cpus);
cpuset_force_rebuild();
}
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index c0adb7254b45..a8350d2d63e6 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -74,64 +74,109 @@ __bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
}
/**
- * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
- * @pos: current position
- * @root: root of the tree to traversal
+ * cgroup_rstat_push_children - push children cgroups into the given list
+ * @head: current head of the list (= subtree root)
+ * @child: first child of the root
* @cpu: target cpu
+ * Return: A new singly linked list of cgroups to be flush
*
- * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts
- * the traversal and %NULL return indicates the end. During traversal,
- * each returned cgroup is unlinked from the tree. Must be called with the
- * matching cgroup_rstat_cpu_lock held.
+ * Iteratively traverse down the cgroup_rstat_cpu updated tree level by
+ * level and push all the parents first before their next level children
+ * into a singly linked list built from the tail backward like "pushing"
+ * cgroups into a stack. The root is pushed by the caller.
+ */
+static struct cgroup *cgroup_rstat_push_children(struct cgroup *head,
+ struct cgroup *child, int cpu)
+{
+ struct cgroup *chead = child; /* Head of child cgroup level */
+ struct cgroup *ghead = NULL; /* Head of grandchild cgroup level */
+ struct cgroup *parent, *grandchild;
+ struct cgroup_rstat_cpu *crstatc;
+
+ child->rstat_flush_next = NULL;
+
+next_level:
+ while (chead) {
+ child = chead;
+ chead = child->rstat_flush_next;
+ parent = cgroup_parent(child);
+
+ /* updated_next is parent cgroup terminated */
+ while (child != parent) {
+ child->rstat_flush_next = head;
+ head = child;
+ crstatc = cgroup_rstat_cpu(child, cpu);
+ grandchild = crstatc->updated_children;
+ if (grandchild != child) {
+ /* Push the grand child to the next level */
+ crstatc->updated_children = child;
+ grandchild->rstat_flush_next = ghead;
+ ghead = grandchild;
+ }
+ child = crstatc->updated_next;
+ crstatc->updated_next = NULL;
+ }
+ }
+
+ if (ghead) {
+ chead = ghead;
+ ghead = NULL;
+ goto next_level;
+ }
+ return head;
+}
+
+/**
+ * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
+ * @root: root of the cgroup subtree to traverse
+ * @cpu: target cpu
+ * Return: A singly linked list of cgroups to be flushed
+ *
+ * Walks the updated rstat_cpu tree on @cpu from @root. During traversal,
+ * each returned cgroup is unlinked from the updated tree.
*
* The only ordering guarantee is that, for a parent and a child pair
- * covered by a given traversal, if a child is visited, its parent is
- * guaranteed to be visited afterwards.
+ * covered by a given traversal, the child is before its parent in
+ * the list.
+ *
+ * Note that updated_children is self terminated and points to a list of
+ * child cgroups if not empty. Whereas updated_next is like a sibling link
+ * within the children list and terminated by the parent cgroup. An exception
+ * here is the cgroup root whose updated_next can be self terminated.
*/
-static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
- struct cgroup *root, int cpu)
+static struct cgroup *cgroup_rstat_updated_list(struct cgroup *root, int cpu)
{
- struct cgroup_rstat_cpu *rstatc;
- struct cgroup *parent;
-
- if (pos == root)
- return NULL;
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
+ struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(root, cpu);
+ struct cgroup *head = NULL, *parent, *child;
+ unsigned long flags;
/*
- * We're gonna walk down to the first leaf and visit/remove it. We
- * can pick whatever unvisited node as the starting point.
+ * The _irqsave() is needed because cgroup_rstat_lock is
+ * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
+ * this lock with the _irq() suffix only disables interrupts on
+ * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
+ * interrupts on both configurations. The _irqsave() ensures
+ * that interrupts are always disabled and later restored.
*/
- if (!pos) {
- pos = root;
- /* return NULL if this subtree is not on-list */
- if (!cgroup_rstat_cpu(pos, cpu)->updated_next)
- return NULL;
- } else {
- pos = cgroup_parent(pos);
- }
+ raw_spin_lock_irqsave(cpu_lock, flags);
- /* walk down to the first leaf */
- while (true) {
- rstatc = cgroup_rstat_cpu(pos, cpu);
- if (rstatc->updated_children == pos)
- break;
- pos = rstatc->updated_children;
- }
+ /* Return NULL if this subtree is not on-list */
+ if (!rstatc->updated_next)
+ goto unlock_ret;
/*
- * Unlink @pos from the tree. As the updated_children list is
+ * Unlink @root from its parent. As the updated_children list is
* singly linked, we have to walk it to find the removal point.
- * However, due to the way we traverse, @pos will be the first
- * child in most cases. The only exception is @root.
*/
- parent = cgroup_parent(pos);
+ parent = cgroup_parent(root);
if (parent) {
struct cgroup_rstat_cpu *prstatc;
struct cgroup **nextp;
prstatc = cgroup_rstat_cpu(parent, cpu);
nextp = &prstatc->updated_children;
- while (*nextp != pos) {
+ while (*nextp != root) {
struct cgroup_rstat_cpu *nrstatc;
nrstatc = cgroup_rstat_cpu(*nextp, cpu);
@@ -142,7 +187,17 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
}
rstatc->updated_next = NULL;
- return pos;
+
+ /* Push @root to the list first before pushing the children */
+ head = root;
+ root->rstat_flush_next = NULL;
+ child = rstatc->updated_children;
+ rstatc->updated_children = root;
+ if (child != root)
+ head = cgroup_rstat_push_children(head, child, cpu);
+unlock_ret:
+ raw_spin_unlock_irqrestore(cpu_lock, flags);
+ return head;
}
/*
@@ -176,21 +231,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
lockdep_assert_held(&cgroup_rstat_lock);
for_each_possible_cpu(cpu) {
- raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
- cpu);
- struct cgroup *pos = NULL;
- unsigned long flags;
+ struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
- /*
- * The _irqsave() is needed because cgroup_rstat_lock is
- * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
- * this lock with the _irq() suffix only disables interrupts on
- * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
- * interrupts on both configurations. The _irqsave() ensures
- * that interrupts are always disabled and later restored.
- */
- raw_spin_lock_irqsave(cpu_lock, flags);
- while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
+ for (; pos; pos = pos->rstat_flush_next) {
struct cgroup_subsys_state *css;
cgroup_base_stat_flush(pos, cpu);
@@ -202,7 +245,6 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
- raw_spin_unlock_irqrestore(cpu_lock, flags);
/* play nice and yield if necessary */
if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a86972a91991..e6ec3ba4950b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2125,11 +2125,6 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = relay_prepare_cpu,
.teardown.single = NULL,
},
- [CPUHP_SLAB_PREPARE] = {
- .name = "slab:prepare",
- .startup.single = slab_prepare_cpu,
- .teardown.single = slab_dead_cpu,
- },
[CPUHP_RCUTREE_PREP] = {
.name = "RCU/tree:prepare",
.startup.single = rcutree_prepare_cpu,
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index d4313b53837e..d48315667752 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -13,7 +13,6 @@
#include <linux/memory.h>
#include <linux/cpuhotplug.h>
#include <linux/memblock.h>
-#include <linux/kexec.h>
#include <linux/kmemleak.h>
#include <asm/page.h>
@@ -551,9 +550,11 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
phdr->p_align = 0;
ehdr->e_phnum++;
- pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
- phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
- ehdr->e_phnum, phdr->p_offset);
+#ifdef CONFIG_KEXEC_FILE
+ kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
+ phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
+ ehdr->e_phnum, phdr->p_offset);
+#endif
phdr++;
}
@@ -565,9 +566,8 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart, unsigned long long mend)
{
- int i, j;
+ int i;
unsigned long long start, end, p_start, p_end;
- struct range temp_range = {0, 0};
for (i = 0; i < mem->nr_ranges; i++) {
start = mem->ranges[i].start;
@@ -575,72 +575,51 @@ int crash_exclude_mem_range(struct crash_mem *mem,
p_start = mstart;
p_end = mend;
- if (mstart > end || mend < start)
+ if (p_start > end)
continue;
+ /*
+ * Because the memory ranges in mem->ranges are stored in
+ * ascending order, when we detect `p_end < start`, we can
+ * immediately exit the for loop, as the subsequent memory
+ * ranges will definitely be outside the range we are looking
+ * for.
+ */
+ if (p_end < start)
+ break;
+
/* Truncate any area outside of range */
- if (mstart < start)
+ if (p_start < start)
p_start = start;
- if (mend > end)
+ if (p_end > end)
p_end = end;
/* Found completely overlapping range */
if (p_start == start && p_end == end) {
- mem->ranges[i].start = 0;
- mem->ranges[i].end = 0;
- if (i < mem->nr_ranges - 1) {
- /* Shift rest of the ranges to left */
- for (j = i; j < mem->nr_ranges - 1; j++) {
- mem->ranges[j].start =
- mem->ranges[j+1].start;
- mem->ranges[j].end =
- mem->ranges[j+1].end;
- }
-
- /*
- * Continue to check if there are another overlapping ranges
- * from the current position because of shifting the above
- * mem ranges.
- */
- i--;
- mem->nr_ranges--;
- continue;
- }
+ memmove(&mem->ranges[i], &mem->ranges[i + 1],
+ (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
+ i--;
mem->nr_ranges--;
- return 0;
- }
-
- if (p_start > start && p_end < end) {
+ } else if (p_start > start && p_end < end) {
/* Split original range */
+ if (mem->nr_ranges >= mem->max_nr_ranges)
+ return -ENOMEM;
+
+ memmove(&mem->ranges[i + 2], &mem->ranges[i + 1],
+ (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
+
mem->ranges[i].end = p_start - 1;
- temp_range.start = p_end + 1;
- temp_range.end = end;
+ mem->ranges[i + 1].start = p_end + 1;
+ mem->ranges[i + 1].end = end;
+
+ i++;
+ mem->nr_ranges++;
} else if (p_start != start)
mem->ranges[i].end = p_start - 1;
else
mem->ranges[i].start = p_end + 1;
- break;
- }
-
- /* If a split happened, add the split to array */
- if (!temp_range.end)
- return 0;
-
- /* Split happened */
- if (i == mem->max_nr_ranges - 1)
- return -ENOMEM;
-
- /* Location where new range should go */
- j = i + 1;
- if (j < mem->nr_ranges) {
- /* Move over all ranges one slot towards the end */
- for (i = mem->nr_ranges - 1; i >= j; i--)
- mem->ranges[i + 1] = mem->ranges[i];
}
- mem->ranges[j].start = temp_range.start;
- mem->ranges[j].end = temp_range.end;
- mem->nr_ranges++;
return 0;
}
@@ -802,7 +781,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_OFFSET(vmap_area, va_start);
VMCOREINFO_OFFSET(vmap_area, list);
- VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
+ VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
log_buf_vmcoreinfo_setup();
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index b481c48a31a6..d10613eb0f63 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -84,8 +84,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
void *addr;
int ret = -ENOMEM;
- /* Cannot allocate larger than MAX_ORDER */
- order = min(get_order(pool_size), MAX_ORDER);
+ /* Cannot allocate larger than MAX_PAGE_ORDER */
+ order = min(get_order(pool_size), MAX_PAGE_ORDER);
do {
pool_size = 1 << (PAGE_SHIFT + order);
@@ -190,7 +190,7 @@ static int __init dma_atomic_pool_init(void)
/*
* If coherent_pool was not used on the command line, default the pool
- * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER.
+ * sizes to 128KB per 1GB of memory, min 128KB, max MAX_PAGE_ORDER.
*/
if (!atomic_pool_size) {
unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 33d942615be5..176078bf2215 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -686,8 +686,8 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
size_t pool_size;
size_t tlb_size;
- if (nslabs > SLABS_PER_PAGE << MAX_ORDER) {
- nslabs = SLABS_PER_PAGE << MAX_ORDER;
+ if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) {
+ nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER;
nareas = limit_nareas(nareas, nslabs);
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index d7ee4bc3f2ba..88cb3c88aaa5 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -15,26 +15,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
-/* See comment for enter_from_user_mode() in entry-common.h */
-static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
-{
- arch_enter_from_user_mode(regs);
- lockdep_hardirqs_off(CALLER_ADDR0);
-
- CT_WARN_ON(__ct_state() != CONTEXT_USER);
- user_exit_irqoff();
-
- instrumentation_begin();
- kmsan_unpoison_entry_regs(regs);
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-
-void noinstr enter_from_user_mode(struct pt_regs *regs)
-{
- __enter_from_user_mode(regs);
-}
-
static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
{
if (unlikely(audit_context())) {
@@ -45,7 +25,7 @@ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
}
}
-static long syscall_trace_enter(struct pt_regs *regs, long syscall,
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
unsigned long work)
{
long ret = 0;
@@ -85,67 +65,24 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
return ret ? : syscall;
}
-static __always_inline long
-__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
-{
- unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
-
- if (work & SYSCALL_WORK_ENTER)
- syscall = syscall_trace_enter(regs, syscall, work);
-
- return syscall;
-}
-
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
-{
- return __syscall_enter_from_user_work(regs, syscall);
-}
-
-noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
-{
- long ret;
-
- __enter_from_user_mode(regs);
-
- instrumentation_begin();
- local_irq_enable();
- ret = __syscall_enter_from_user_work(regs, syscall);
- instrumentation_end();
-
- return ret;
-}
-
noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
{
- __enter_from_user_mode(regs);
+ enter_from_user_mode(regs);
instrumentation_begin();
local_irq_enable();
instrumentation_end();
}
-/* See comment for exit_to_user_mode() in entry-common.h */
-static __always_inline void __exit_to_user_mode(void)
-{
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare();
- instrumentation_end();
-
- user_enter_irqoff();
- arch_exit_to_user_mode();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-
-void noinstr exit_to_user_mode(void)
-{
- __exit_to_user_mode();
-}
-
/* Workaround to allow gradual conversion of architecture code */
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
-static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
- unsigned long ti_work)
+/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ * @regs: Pointer to pt_regs on entry stack
+ * @ti_work: TIF work flags as read by the caller
+ */
+__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work)
{
/*
* Before returning to user space ensure that all pending work
@@ -190,27 +127,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
return ti_work;
}
-static void exit_to_user_mode_prepare(struct pt_regs *regs)
-{
- unsigned long ti_work;
-
- lockdep_assert_irqs_disabled();
-
- /* Flush pending rcuog wakeup before the last need_resched() check */
- tick_nohz_user_enter_prepare();
-
- ti_work = read_thread_flags();
- if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
- ti_work = exit_to_user_mode_loop(regs, ti_work);
-
- arch_exit_to_user_mode_prepare(regs, ti_work);
-
- /* Ensure that kernel state is sane for a return to userspace */
- kmap_assert_nomap();
- lockdep_assert_irqs_disabled();
- lockdep_sys_exit();
-}
-
/*
* If SYSCALL_EMU is set, then the only reason to report is when
* SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
@@ -295,12 +211,12 @@ __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
instrumentation_begin();
__syscall_exit_to_user_mode_work(regs);
instrumentation_end();
- __exit_to_user_mode();
+ exit_to_user_mode();
}
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{
- __enter_from_user_mode(regs);
+ enter_from_user_mode(regs);
}
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
@@ -308,7 +224,7 @@ noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
instrumentation_begin();
exit_to_user_mode_prepare(regs);
instrumentation_end();
- __exit_to_user_mode();
+ exit_to_user_mode();
}
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9efd0d7775e7..f0f0f71213a1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7397,6 +7397,14 @@ void perf_output_sample(struct perf_output_handle *handle,
if (branch_sample_hw_index(event))
perf_output_put(handle, data->br_stack->hw_idx);
perf_output_copy(handle, data->br_stack->entries, size);
+ /*
+ * Add the extension space which is appended
+ * right after the struct perf_branch_stack.
+ */
+ if (data->br_stack_cntr) {
+ size = data->br_stack->nr * sizeof(u64);
+ perf_output_copy(handle, data->br_stack_cntr, size);
+ }
} else {
/*
* we always store at least the value of nr
@@ -11425,9 +11433,30 @@ static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
static struct attribute *pmu_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_perf_event_mux_interval_ms.attr,
+ &dev_attr_nr_addr_filters.attr,
+ NULL,
+};
+
+static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmu *pmu = dev_get_drvdata(dev);
+
+ if (n == 2 && !pmu->nr_addr_filters)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group pmu_dev_attr_group = {
+ .is_visible = pmu_dev_is_visible,
+ .attrs = pmu_dev_attrs,
+};
+
+static const struct attribute_group *pmu_dev_groups[] = {
+ &pmu_dev_attr_group,
NULL,
};
-ATTRIBUTE_GROUPS(pmu_dev);
static int pmu_bus_running;
static struct bus_type pmu_bus = {
@@ -11464,18 +11493,11 @@ static int pmu_dev_alloc(struct pmu *pmu)
if (ret)
goto free_dev;
- /* For PMUs with address filters, throw in an extra attribute: */
- if (pmu->nr_addr_filters)
- ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
-
- if (ret)
- goto del_dev;
-
- if (pmu->attr_update)
+ if (pmu->attr_update) {
ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
-
- if (ret)
- goto del_dev;
+ if (ret)
+ goto del_dev;
+ }
out:
return ret;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index e8d82c2f07d0..60ed43d1c29e 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -610,8 +610,8 @@ static struct page *rb_alloc_aux_page(int node, int order)
{
struct page *page;
- if (order > MAX_ORDER)
- order = MAX_ORDER;
+ if (order > MAX_PAGE_ORDER)
+ order = MAX_PAGE_ORDER;
do {
page = alloc_pages_node(node, PERF_AUX_GFP, order);
@@ -702,9 +702,9 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
/*
* kcalloc_node() is unable to allocate buffer if the size is larger
- * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
+ * than: PAGE_SIZE << MAX_PAGE_ORDER; directly bail out in this case.
*/
- if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
+ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_PAGE_ORDER)
return -ENOMEM;
rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
node);
@@ -821,7 +821,7 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct perf_buffer);
size += nr_pages * sizeof(void *);
- if (order_base_2(size) > PAGE_SHIFT+MAX_ORDER)
+ if (order_base_2(size) > PAGE_SHIFT+MAX_PAGE_ORDER)
goto fail;
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 435aac1d8c27..485bb0389b48 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
if (new_page) {
folio_get(new_folio);
- page_add_new_anon_rmap(new_page, vma, addr);
+ folio_add_new_anon_rmap(new_folio, vma, addr);
folio_add_lru_vma(new_folio, vma);
} else
/* no new page, just dec_mm_counter for old_page */
@@ -198,7 +198,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
set_pte_at_notify(mm, addr, pvmw.pte,
mk_pte(new_page, vma->vm_page_prot));
- page_remove_rmap(old_page, vma, false);
+ folio_remove_rmap_pte(old_folio, old_page, vma);
if (!folio_mapped(old_folio))
folio_free_swap(old_folio);
page_vma_mapped_walk_done(&pvmw);
diff --git a/kernel/fork.c b/kernel/fork.c
index 10917c3e1f03..b32e323adbbf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -165,7 +165,6 @@ void __weak arch_release_task_struct(struct task_struct *tsk)
{
}
-#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static struct kmem_cache *task_struct_cachep;
static inline struct task_struct *alloc_task_struct_node(int node)
@@ -177,9 +176,6 @@ static inline void free_task_struct(struct task_struct *tsk)
{
kmem_cache_free(task_struct_cachep, tsk);
}
-#endif
-
-#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
@@ -412,24 +408,6 @@ void thread_stack_cache_init(void)
}
# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
-#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
-
-static int alloc_thread_stack_node(struct task_struct *tsk, int node)
-{
- unsigned long *stack;
-
- stack = arch_alloc_thread_stack_node(tsk, node);
- tsk->stack = stack;
- return stack ? 0 : -ENOMEM;
-}
-
-static void free_thread_stack(struct task_struct *tsk)
-{
- arch_free_thread_stack(tsk);
- tsk->stack = NULL;
-}
-
-#endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
@@ -650,7 +628,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
int retval;
unsigned long charge = 0;
LIST_HEAD(uf);
- VMA_ITERATOR(old_vmi, oldmm, 0);
VMA_ITERATOR(vmi, mm, 0);
uprobe_start_dup_mmap();
@@ -678,16 +655,22 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
goto out;
khugepaged_fork(mm, oldmm);
- retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count);
- if (retval)
+ /* Use __mt_dup() to efficiently build an identical maple tree. */
+ retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
+ if (unlikely(retval))
goto out;
mt_clear_in_rcu(vmi.mas.tree);
- for_each_vma(old_vmi, mpnt) {
+ for_each_vma(vmi, mpnt) {
struct file *file;
vma_start_write(mpnt);
if (mpnt->vm_flags & VM_DONTCOPY) {
+ retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
+ mpnt->vm_end, GFP_KERNEL);
+ if (retval)
+ goto loop_out;
+
vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
continue;
}
@@ -749,9 +732,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
if (is_vm_hugetlb_page(tmp))
hugetlb_dup_vma_private(tmp);
- /* Link the vma into the MT */
- if (vma_iter_bulk_store(&vmi, tmp))
- goto fail_nomem_vmi_store;
+ /*
+ * Link the vma into the MT. After using __mt_dup(), memory
+ * allocation is not necessary here, so it cannot fail.
+ */
+ vma_iter_bulk_store(&vmi, tmp);
mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
@@ -760,15 +745,28 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
- if (retval)
+ if (retval) {
+ mpnt = vma_next(&vmi);
goto loop_out;
+ }
}
/* a new mm has just been created */
retval = arch_dup_mmap(oldmm, mm);
loop_out:
vma_iter_free(&vmi);
- if (!retval)
+ if (!retval) {
mt_set_in_rcu(vmi.mas.tree);
+ } else if (mpnt) {
+ /*
+ * The entire maple tree has already been duplicated. If the
+ * mmap duplication fails, mark the failure point with
+ * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
+ * stop releasing VMAs that have not been duplicated after this
+ * point.
+ */
+ mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
+ mas_store(&vmi.mas, XA_ZERO_ENTRY);
+ }
out:
mmap_write_unlock(mm);
flush_tlb_mm(oldmm);
@@ -778,8 +776,6 @@ fail_uprobe_end:
uprobe_end_dup_mmap();
return retval;
-fail_nomem_vmi_store:
- unlink_anon_vmas(tmp);
fail_nomem_anon_vma_fork:
mpol_put(vma_policy(tmp));
fail_nomem_policy:
@@ -1021,7 +1017,6 @@ static void set_max_threads(unsigned int max_threads_suggested)
int arch_task_struct_size __read_mostly;
#endif
-#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
{
/* Fetch thread_struct whitelist for the architecture. */
@@ -1036,12 +1031,10 @@ static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
else
*offset += offsetof(struct task_struct, thread);
}
-#endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
void __init fork_init(void)
{
int i;
-#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN 0
#endif
@@ -1054,7 +1047,6 @@ void __init fork_init(void)
arch_task_struct_size, align,
SLAB_PANIC|SLAB_ACCOUNT,
useroffset, usersize, NULL);
-#endif
/* do the arch specific task caches init */
arch_task_cache_init();
@@ -1588,7 +1580,7 @@ static void complete_vfork_done(struct task_struct *tsk)
static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{
- unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE;
+ unsigned int state = TASK_KILLABLE|TASK_FREEZABLE;
int killed;
cgroup_enter_frozen();
@@ -2928,7 +2920,7 @@ pid_t kernel_clone(struct kernel_clone_args *args)
get_task_struct(p);
}
- if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
+ if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) {
/* lock the task to synchronize with memcg migration */
task_lock(p);
lru_gen_add_mm(p->mm);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 759006a9a910..f57aaf96b829 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -187,6 +187,7 @@ static int __restore_freezer_state(struct task_struct *p, void *arg)
if (state != TASK_RUNNING) {
WRITE_ONCE(p->__state, state);
+ p->saved_state = TASK_RUNNING;
return 1;
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index be5642a4ec49..a08031b57a61 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -52,6 +52,8 @@ atomic_t __kexec_lock = ATOMIC_INIT(0);
/* Flag to indicate we are going to kexec a new kernel */
bool kexec_in_progress = false;
+bool kexec_file_dbg_print;
+
int kexec_should_crash(struct task_struct *p)
{
/*
@@ -276,8 +278,8 @@ int kimage_is_destination_range(struct kimage *image,
unsigned long mstart, mend;
mstart = image->segment[i].mem;
- mend = mstart + image->segment[i].memsz;
- if ((end > mstart) && (start < mend))
+ mend = mstart + image->segment[i].memsz - 1;
+ if ((end >= mstart) && (start <= mend))
return 1;
}
@@ -370,7 +372,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
pfn = page_to_boot_pfn(pages);
epfn = pfn + count;
addr = pfn << PAGE_SHIFT;
- eaddr = epfn << PAGE_SHIFT;
+ eaddr = (epfn << PAGE_SHIFT) - 1;
if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
kimage_is_destination_range(image, addr, eaddr)) {
list_add(&pages->lru, &extra_pages);
@@ -430,7 +432,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
pages = NULL;
size = (1 << order) << PAGE_SHIFT;
- hole_start = (image->control_page + (size - 1)) & ~(size - 1);
+ hole_start = ALIGN(image->control_page, size);
hole_end = hole_start + size - 1;
while (hole_end <= crashk_res.end) {
unsigned long i;
@@ -447,7 +449,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
mend = mstart + image->segment[i].memsz - 1;
if ((hole_end >= mstart) && (hole_start <= mend)) {
/* Advance the hole to the end of the segment */
- hole_start = (mend + (size - 1)) & ~(size - 1);
+ hole_start = ALIGN(mend, size);
hole_end = hole_start + size - 1;
break;
}
@@ -455,7 +457,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
/* If I don't overlap any segments I have found my hole! */
if (i == image->nr_segments) {
pages = pfn_to_page(hole_start >> PAGE_SHIFT);
- image->control_page = hole_end;
+ image->control_page = hole_end + 1;
break;
}
}
@@ -716,7 +718,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
/* If the page is not a destination page use it */
if (!kimage_is_destination_range(image, addr,
- addr + PAGE_SIZE))
+ addr + PAGE_SIZE - 1))
break;
/*
@@ -1063,9 +1065,10 @@ __bpf_kfunc void crash_kexec(struct pt_regs *regs)
* panic(). Otherwise parallel calls of panic() and crash_kexec()
* may stop each other. To exclude them, we use panic_cpu here too.
*/
+ old_cpu = PANIC_CPU_INVALID;
this_cpu = raw_smp_processor_id();
- old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
- if (old_cpu == PANIC_CPU_INVALID) {
+
+ if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) {
/* This is the 1st CPU which comes here, so go ahead. */
__crash_kexec(regs);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index f9a419cd22d4..bef2f6f2571b 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -123,6 +123,8 @@ void kimage_file_post_load_cleanup(struct kimage *image)
*/
kfree(image->image_loader_data);
image->image_loader_data = NULL;
+
+ kexec_file_dbg_print = false;
}
#ifdef CONFIG_KEXEC_SIG
@@ -202,6 +204,8 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
if (ret < 0)
return ret;
image->kernel_buf_len = ret;
+ kexec_dprintk("kernel: %p kernel_size: %#lx\n",
+ image->kernel_buf, image->kernel_buf_len);
/* Call arch image probe handlers */
ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
@@ -278,6 +282,7 @@ kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
if (!image)
return -ENOMEM;
+ kexec_file_dbg_print = !!(flags & KEXEC_FILE_DEBUG);
image->file_mode = 1;
if (kexec_on_panic) {
@@ -384,13 +389,14 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
if (ret)
goto out;
+ kexec_dprintk("nr_segments = %lu\n", image->nr_segments);
for (i = 0; i < image->nr_segments; i++) {
struct kexec_segment *ksegment;
ksegment = &image->segment[i];
- pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
- i, ksegment->buf, ksegment->bufsz, ksegment->mem,
- ksegment->memsz);
+ kexec_dprintk("segment[%d]: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
+ i, ksegment->buf, ksegment->bufsz, ksegment->mem,
+ ksegment->memsz);
ret = kimage_load_segment(image, &image->segment[i]);
if (ret)
@@ -403,6 +409,8 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
if (ret)
goto out;
+ kexec_dprintk("kexec_file_load: type:%u, start:0x%lx head:0x%lx flags:0x%lx\n",
+ image->type, image->start, image->head, flags);
/*
* Free up any temporary buffers allocated which are not needed
* after image has been loaded
@@ -426,11 +434,11 @@ static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
unsigned long temp_start, temp_end;
temp_end = min(end, kbuf->buf_max);
- temp_start = temp_end - kbuf->memsz;
+ temp_start = temp_end - kbuf->memsz + 1;
do {
/* align down start */
- temp_start = temp_start & (~(kbuf->buf_align - 1));
+ temp_start = ALIGN_DOWN(temp_start, kbuf->buf_align);
if (temp_start < start || temp_start < kbuf->buf_min)
return 0;
@@ -592,6 +600,8 @@ static int kexec_walk_resources(struct kexec_buf *kbuf,
IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
crashk_res.start, crashk_res.end,
kbuf, func);
+ else if (kbuf->top_down)
+ return walk_system_ram_res_rev(0, ULONG_MAX, kbuf, func);
else
return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 2deeeca3e71b..cbae8c0b89ab 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -532,6 +532,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
* This function must not be used in interrupt context. Unlocking
* of a not locked mutex is not allowed.
*
+ * The caller must ensure that the mutex stays alive until this function has
+ * returned - mutex_unlock() can NOT directly be used to release an object such
+ * that another concurrent task can free it.
+ * Mutexes are different from spinlocks & refcounts in this aspect.
+ *
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
diff --git a/kernel/pid.c b/kernel/pid.c
index 6500ef956f2f..b52b10865454 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -700,7 +700,7 @@ static int pidfd_getfd(struct pid *pid, int fd)
if (IS_ERR(file))
return PTR_ERR(file);
- ret = receive_fd(file, O_CLOEXEC);
+ ret = receive_fd(file, NULL, O_CLOEXEC);
fput(file);
return ret;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index dee341ae4ace..4b0b7cf2e019 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -642,9 +642,9 @@ int hibernation_platform_enter(void)
*/
static void power_down(void)
{
-#ifdef CONFIG_SUSPEND
int error;
+#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
error = suspend_devices_and_enter(mem_sleep_current);
if (error) {
@@ -667,7 +667,13 @@ static void power_down(void)
kernel_restart(NULL);
break;
case HIBERNATION_PLATFORM:
- hibernation_platform_enter();
+ error = hibernation_platform_enter();
+ if (error == -EAGAIN || error == -EBUSY) {
+ swsusp_unmark();
+ events_check_enabled = false;
+ pr_info("Wakeup event detected during hibernation, rolling back.\n");
+ return;
+ }
fallthrough;
case HIBERNATION_SHUTDOWN:
if (kernel_can_power_off())
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f6425ae3e8b0..b1ae9b677d03 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -60,22 +60,6 @@ EXPORT_SYMBOL_GPL(lock_system_sleep);
void unlock_system_sleep(unsigned int flags)
{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the
- * system_transition_mutex lock, since the freezer always works with
- * system_transition_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
if (!(flags & PF_NOFREEZE))
current->flags &= ~PF_NOFREEZE;
mutex_unlock(&system_transition_mutex);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 17fd9aaaf084..8499a39c62f4 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -175,6 +175,8 @@ extern int swsusp_write(unsigned int flags);
void swsusp_close(void);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
+#else
+static inline int swsusp_unmark(void) { return 0; }
#endif
struct __kernel_old_timeval;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 50a15408c3fc..5c96ff067c64 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1119,7 +1119,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
int create_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
- int error = 0;
+ int error;
if (forbidden_pages_map && free_pages_map)
return 0;
@@ -1487,11 +1487,11 @@ static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
- src = kmap_atomic(s_page);
- dst = kmap_atomic(d_page);
+ src = kmap_local_page(s_page);
+ dst = kmap_local_page(d_page);
zeros_only = do_copy_page(dst, src);
- kunmap_atomic(dst);
- kunmap_atomic(src);
+ kunmap_local(dst);
+ kunmap_local(src);
} else {
if (PageHighMem(d_page)) {
/*
@@ -1499,9 +1499,9 @@ static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
* data modified by kmap_atomic()
*/
zeros_only = safe_copy_page(buffer, s_page);
- dst = kmap_atomic(d_page);
+ dst = kmap_local_page(d_page);
copy_page(dst, buffer);
- kunmap_atomic(dst);
+ kunmap_local(dst);
} else {
zeros_only = safe_copy_page(page_address(d_page), s_page);
}
@@ -2778,7 +2778,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
int snapshot_write_next(struct snapshot_handle *handle)
{
static struct chain_allocator ca;
- int error = 0;
+ int error;
next:
/* Check if we have already loaded the entire image */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index a2cb0babb5ec..6053ddddaf65 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -451,7 +451,7 @@ err_close:
static int swap_write_page(struct swap_map_handle *handle, void *buf,
struct hib_bio_batch *hb)
{
- int error = 0;
+ int error;
sector_t offset;
if (!handle->cur)
@@ -606,11 +606,11 @@ static int crc32_threadfn(void *data)
unsigned i;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -619,7 +619,7 @@ static int crc32_threadfn(void *data)
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -649,12 +649,12 @@ static int lzo_compress_threadfn(void *data)
struct cmp_data *d = data;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -663,7 +663,7 @@ static int lzo_compress_threadfn(void *data)
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -798,7 +798,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
data[thr].unc_len = off;
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@@ -806,12 +806,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
break;
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@@ -850,7 +850,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
}
}
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
@@ -1132,12 +1132,12 @@ static int lzo_decompress_threadfn(void *data)
struct dec_data *d = data;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -1150,7 +1150,7 @@ static int lzo_decompress_threadfn(void *data)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -1335,7 +1335,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
crc->run_threads = 0;
}
@@ -1371,7 +1371,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
pg = 0;
}
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@@ -1390,7 +1390,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@@ -1421,7 +1421,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = snapshot_write_next(snapshot);
if (ret <= 0) {
crc->run_threads = thr + 1;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
goto out_finish;
}
@@ -1429,13 +1429,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
}
out_finish:
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
stop = ktime_get();
@@ -1566,7 +1566,6 @@ put:
/**
* swsusp_close - close resume device.
- * @exclusive: Close the resume device which is exclusively opened.
*/
void swsusp_close(void)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index d8b5e13a2229..2fabd497d659 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -145,20 +145,9 @@ void __ptrace_unlink(struct task_struct *child)
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
- child->signal->group_stop_count)) {
+ child->signal->group_stop_count))
child->jobctl |= JOBCTL_STOP_PENDING;
- /*
- * This is only possible if this thread was cloned by the
- * traced task running in the stopped group, set the signal
- * for the future reports.
- * FIXME: we should change ptrace_init_task() to handle this
- * case.
- */
- if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
- child->jobctl |= SIGSTOP;
- }
-
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child
@@ -386,6 +375,34 @@ static int check_ptrace_options(unsigned long data)
return 0;
}
+static inline void ptrace_set_stopped(struct task_struct *task)
+{
+ guard(spinlock)(&task->sighand->siglock);
+
+ /*
+ * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
+ * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
+ * will be cleared if the child completes the transition or any
+ * event which clears the group stop states happens. We'll wait
+ * for the transition to complete before returning from this
+ * function.
+ *
+ * This hides STOPPED -> RUNNING -> TRACED transition from the
+ * attaching thread but a different thread in the same group can
+ * still observe the transient RUNNING state. IOW, if another
+ * thread's WNOHANG wait(2) on the stopped tracee races against
+ * ATTACH, the wait(2) may fail due to the transient RUNNING.
+ *
+ * The following task_is_stopped() test is safe as both transitions
+ * in and out of STOPPED are protected by siglock.
+ */
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
+ task->jobctl &= ~JOBCTL_STOPPED;
+ signal_wake_up_state(task, __TASK_STOPPED);
+ }
+}
+
static int ptrace_attach(struct task_struct *task, long request,
unsigned long addr,
unsigned long flags)
@@ -393,17 +410,17 @@ static int ptrace_attach(struct task_struct *task, long request,
bool seize = (request == PTRACE_SEIZE);
int retval;
- retval = -EIO;
if (seize) {
if (addr != 0)
- goto out;
+ return -EIO;
/*
* This duplicates the check in check_ptrace_options() because
* ptrace_attach() and ptrace_setoptions() have historically
* used different error codes for unknown ptrace options.
*/
if (flags & ~(unsigned long)PTRACE_O_MASK)
- goto out;
+ return -EIO;
+
retval = check_ptrace_options(flags);
if (retval)
return retval;
@@ -414,88 +431,54 @@ static int ptrace_attach(struct task_struct *task, long request,
audit_ptrace(task);
- retval = -EPERM;
if (unlikely(task->flags & PF_KTHREAD))
- goto out;
+ return -EPERM;
if (same_thread_group(task, current))
- goto out;
+ return -EPERM;
/*
* Protect exec's credential calculations against our interference;
* SUID, SGID and LSM creds get determined differently
* under ptrace.
*/
- retval = -ERESTARTNOINTR;
- if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
- goto out;
+ scoped_cond_guard (mutex_intr, return -ERESTARTNOINTR,
+ &task->signal->cred_guard_mutex) {
- task_lock(task);
- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
- task_unlock(task);
- if (retval)
- goto unlock_creds;
+ scoped_guard (task_lock, task) {
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+ if (retval)
+ return retval;
+ }
- write_lock_irq(&tasklist_lock);
- retval = -EPERM;
- if (unlikely(task->exit_state))
- goto unlock_tasklist;
- if (task->ptrace)
- goto unlock_tasklist;
+ scoped_guard (write_lock_irq, &tasklist_lock) {
+ if (unlikely(task->exit_state))
+ return -EPERM;
+ if (task->ptrace)
+ return -EPERM;
- task->ptrace = flags;
+ task->ptrace = flags;
- ptrace_link(task, current);
+ ptrace_link(task, current);
- /* SEIZE doesn't trap tracee on attach */
- if (!seize)
- send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
+ /* SEIZE doesn't trap tracee on attach */
+ if (!seize)
+ send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
- spin_lock(&task->sighand->siglock);
+ ptrace_set_stopped(task);
+ }
+ }
/*
- * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
- * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
- * will be cleared if the child completes the transition or any
- * event which clears the group stop states happens. We'll wait
- * for the transition to complete before returning from this
- * function.
- *
- * This hides STOPPED -> RUNNING -> TRACED transition from the
- * attaching thread but a different thread in the same group can
- * still observe the transient RUNNING state. IOW, if another
- * thread's WNOHANG wait(2) on the stopped tracee races against
- * ATTACH, the wait(2) may fail due to the transient RUNNING.
- *
- * The following task_is_stopped() test is safe as both transitions
- * in and out of STOPPED are protected by siglock.
+ * We do not bother to change retval or clear JOBCTL_TRAPPING
+ * if wait_on_bit() was interrupted by SIGKILL. The tracer will
+ * not return to user-mode, it will exit and clear this bit in
+ * __ptrace_unlink() if it wasn't already cleared by the tracee;
+ * and until then nobody can ptrace this task.
*/
- if (task_is_stopped(task) &&
- task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
- task->jobctl &= ~JOBCTL_STOPPED;
- signal_wake_up_state(task, __TASK_STOPPED);
- }
-
- spin_unlock(&task->sighand->siglock);
-
- retval = 0;
-unlock_tasklist:
- write_unlock_irq(&tasklist_lock);
-unlock_creds:
- mutex_unlock(&task->signal->cred_guard_mutex);
-out:
- if (!retval) {
- /*
- * We do not bother to change retval or clear JOBCTL_TRAPPING
- * if wait_on_bit() was interrupted by SIGKILL. The tracer will
- * not return to user-mode, it will exit and clear this bit in
- * __ptrace_unlink() if it wasn't already cleared by the tracee;
- * and until then nobody can ptrace this task.
- */
- wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
- proc_ptrace_connector(task, PTRACE_ATTACH);
- }
+ wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
+ proc_ptrace_connector(task, PTRACE_ATTACH);
- return retval;
+ return 0;
}
/**
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 395a0ea3c7a8..22c16e2564cc 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -59,6 +59,14 @@ struct sys_off_handler {
};
/*
+ * This variable is used to indicate if a halt was initiated instead of a
+ * reboot when the reboot call was invoked with LINUX_REBOOT_CMD_POWER_OFF, but
+ * the system cannot be powered off. This allowes kernel_halt() to notify users
+ * of that.
+ */
+static bool poweroff_fallback_to_halt;
+
+/*
* Temporary stub that prevents linkage failure while we're in process
* of removing all uses of legacy pm_power_off() around the kernel.
*/
@@ -297,7 +305,10 @@ void kernel_halt(void)
kernel_shutdown_prepare(SYSTEM_HALT);
migrate_to_reboot_cpu();
syscore_shutdown();
- pr_emerg("System halted\n");
+ if (poweroff_fallback_to_halt)
+ pr_emerg("Power off not available: System halted instead\n");
+ else
+ pr_emerg("System halted\n");
kmsg_dump(KMSG_DUMP_SHUTDOWN);
machine_halt();
}
@@ -732,8 +743,10 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
/* Instead of trying to make the power_off code look like
* halt when pm_power_off is not set do it the easy way.
*/
- if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off())
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off()) {
+ poweroff_fallback_to_halt = true;
cmd = LINUX_REBOOT_CMD_HALT;
+ }
mutex_lock(&system_transition_mutex);
switch (cmd) {
@@ -957,21 +970,24 @@ static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
}
/**
- * hw_protection_shutdown - Trigger an emergency system poweroff
+ * __hw_protection_shutdown - Trigger an emergency system shutdown or reboot
*
- * @reason: Reason of emergency shutdown to be printed.
- * @ms_until_forced: Time to wait for orderly shutdown before tiggering a
- * forced shudown. Negative value disables the forced
- * shutdown.
+ * @reason: Reason of emergency shutdown or reboot to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown or reboot before
+ * triggering it. Negative value disables the forced
+ * shutdown or reboot.
+ * @shutdown: If true, indicates that a shutdown will happen
+ * after the critical tempeature is reached.
+ * If false, indicates that a reboot will happen
+ * after the critical tempeature is reached.
*
- * Initiate an emergency system shutdown in order to protect hardware from
- * further damage. Usage examples include a thermal protection or a voltage or
- * current regulator failures.
- * NOTE: The request is ignored if protection shutdown is already pending even
- * if the previous request has given a large timeout for forced shutdown.
- * Can be called from any context.
+ * Initiate an emergency system shutdown or reboot in order to protect
+ * hardware from further damage. Usage examples include a thermal protection.
+ * NOTE: The request is ignored if protection shutdown or reboot is already
+ * pending even if the previous request has given a large timeout for forced
+ * shutdown/reboot.
*/
-void hw_protection_shutdown(const char *reason, int ms_until_forced)
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
@@ -986,9 +1002,12 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
* orderly_poweroff failure
*/
hw_failure_emergency_poweroff(ms_until_forced);
- orderly_poweroff(true);
+ if (shutdown)
+ orderly_poweroff(true);
+ else
+ orderly_reboot();
}
-EXPORT_SYMBOL_GPL(hw_protection_shutdown);
+EXPORT_SYMBOL_GPL(__hw_protection_shutdown);
static int __init reboot_setup(char *str)
{
diff --git a/kernel/relay.c b/kernel/relay.c
index 83fe0325cde1..a8e90e98bf2c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1073,167 +1073,6 @@ static ssize_t relay_file_read(struct file *filp,
return written;
}
-static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
-{
- rbuf->bytes_consumed += bytes_consumed;
-
- if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) {
- relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
- rbuf->bytes_consumed %= rbuf->chan->subbuf_size;
- }
-}
-
-static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- struct rchan_buf *rbuf;
-
- rbuf = (struct rchan_buf *)page_private(buf->page);
- relay_consume_bytes(rbuf, buf->private);
-}
-
-static const struct pipe_buf_operations relay_pipe_buf_ops = {
- .release = relay_pipe_buf_release,
- .try_steal = generic_pipe_buf_try_steal,
- .get = generic_pipe_buf_get,
-};
-
-static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
-{
-}
-
-/*
- * subbuf_splice_actor - splice up to one subbuf's worth of data
- */
-static ssize_t subbuf_splice_actor(struct file *in,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags,
- int *nonpad_ret)
-{
- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
- struct rchan_buf *rbuf = in->private_data;
- unsigned int subbuf_size = rbuf->chan->subbuf_size;
- uint64_t pos = (uint64_t) *ppos;
- uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size;
- size_t read_start = (size_t) do_div(pos, alloc_size);
- size_t read_subbuf = read_start / subbuf_size;
- size_t padding = rbuf->padding[read_subbuf];
- size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct splice_pipe_desc spd = {
- .pages = pages,
- .nr_pages = 0,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .partial = partial,
- .ops = &relay_pipe_buf_ops,
- .spd_release = relay_page_release,
- };
- ssize_t ret;
-
- if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
- return 0;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
- /*
- * Adjust read len, if longer than what is available
- */
- if (len > (subbuf_size - read_start % subbuf_size))
- len = subbuf_size - read_start % subbuf_size;
-
- subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
- pidx = (read_start / PAGE_SIZE) % subbuf_pages;
- poff = read_start & ~PAGE_MASK;
- nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
-
- for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
- unsigned int this_len, this_end, private;
- unsigned int cur_pos = read_start + total_len;
-
- if (!len)
- break;
-
- this_len = min_t(unsigned long, len, PAGE_SIZE - poff);
- private = this_len;
-
- spd.pages[spd.nr_pages] = rbuf->page_array[pidx];
- spd.partial[spd.nr_pages].offset = poff;
-
- this_end = cur_pos + this_len;
- if (this_end >= nonpad_end) {
- this_len = nonpad_end - cur_pos;
- private = this_len + padding;
- }
- spd.partial[spd.nr_pages].len = this_len;
- spd.partial[spd.nr_pages].private = private;
-
- len -= this_len;
- total_len += this_len;
- poff = 0;
- pidx = (pidx + 1) % subbuf_pages;
-
- if (this_end >= nonpad_end) {
- spd.nr_pages++;
- break;
- }
- }
-
- ret = 0;
- if (!spd.nr_pages)
- goto out;
-
- ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
- if (ret < 0 || ret < total_len)
- goto out;
-
- if (read_start + ret == nonpad_end)
- ret += padding;
-
-out:
- splice_shrink_spd(&spd);
- return ret;
-}
-
-static ssize_t relay_file_splice_read(struct file *in,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- ssize_t spliced;
- int ret;
- int nonpad_ret = 0;
-
- ret = 0;
- spliced = 0;
-
- while (len && !spliced) {
- ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
- if (ret < 0)
- break;
- else if (!ret) {
- if (flags & SPLICE_F_NONBLOCK)
- ret = -EAGAIN;
- break;
- }
-
- *ppos += ret;
- if (ret > len)
- len = 0;
- else
- len -= ret;
- spliced += nonpad_ret;
- nonpad_ret = 0;
- }
-
- if (spliced)
- return spliced;
-
- return ret;
-}
const struct file_operations relay_file_operations = {
.open = relay_file_open,
@@ -1242,6 +1081,5 @@ const struct file_operations relay_file_operations = {
.read = relay_file_read,
.llseek = no_llseek,
.release = relay_file_release,
- .splice_read = relay_file_splice_read,
};
EXPORT_SYMBOL_GPL(relay_file_operations);
diff --git a/kernel/resource.c b/kernel/resource.c
index 91be1bc50b60..fcbca39dbc45 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -27,6 +27,8 @@
#include <linux/mount.h>
#include <linux/resource_ext.h>
#include <uapi/linux/magic.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
@@ -430,6 +432,61 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
}
/*
+ * This function, being a variant of walk_system_ram_res(), calls the @func
+ * callback against all memory ranges of type System RAM which are marked as
+ * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
+ * higher to lower.
+ */
+int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *))
+{
+ struct resource res, *rams;
+ int rams_size = 16, i;
+ unsigned long flags;
+ int ret = -1;
+
+ /* create a list */
+ rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
+ if (!rams)
+ return ret;
+
+ flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ i = 0;
+ while ((start < end) &&
+ (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
+ if (i >= rams_size) {
+ /* re-alloc */
+ struct resource *rams_new;
+
+ rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
+ (rams_size + 16) * sizeof(struct resource),
+ GFP_KERNEL);
+ if (!rams_new)
+ goto out;
+
+ rams = rams_new;
+ rams_size += 16;
+ }
+
+ rams[i].start = res.start;
+ rams[i++].end = res.end;
+
+ start = res.end + 1;
+ }
+
+ /* go reverse */
+ for (i--; i >= 0; i--) {
+ ret = (*func)(&rams[i], arg);
+ if (ret)
+ break;
+ }
+
+out:
+ kvfree(rams);
+ return ret;
+}
+
+/*
* This function calls the @func callback against all memory ranges, which
* are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
*/
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a708d225c28e..db4be4921e7f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1131,6 +1131,28 @@ static void wake_up_idle_cpu(int cpu)
if (cpu == smp_processor_id())
return;
+ /*
+ * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
+ * part of the idle loop. This forces an exit from the idle loop
+ * and a round trip to schedule(). Now this could be optimized
+ * because a simple new idle loop iteration is enough to
+ * re-evaluate the next tick. Provided some re-ordering of tick
+ * nohz functions that would need to follow TIF_NR_POLLING
+ * clearing:
+ *
+ * - On most archs, a simple fetch_or on ti::flags with a
+ * "0" value would be enough to know if an IPI needs to be sent.
+ *
+ * - x86 needs to perform a last need_resched() check between
+ * monitor and mwait which doesn't take timers into account.
+ * There a dedicated TIF_TIMER flag would be required to
+ * fetch_or here and be checked along with TIF_NEED_RESCHED
+ * before mwait().
+ *
+ * However, remote timer enqueue is not such a frequent event
+ * and testing of the above solutions didn't appear to report
+ * much benefits.
+ */
if (set_nr_and_not_polling(rq->idle))
smp_send_reschedule(cpu);
else
@@ -2124,12 +2146,14 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
enqueue_task(rq, p, flags);
- p->on_rq = TASK_ON_RQ_QUEUED;
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
+ ASSERT_EXCLUSIVE_WRITER(p->on_rq);
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
- p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
+ WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
+ ASSERT_EXCLUSIVE_WRITER(p->on_rq);
dequeue_task(rq, p, flags);
}
@@ -3795,6 +3819,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
rq->idle_stamp = 0;
}
#endif
+
+ p->dl_server = NULL;
}
/*
@@ -4509,10 +4535,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
memset(&p->stats, 0, sizeof(p->stats));
#endif
- RB_CLEAR_NODE(&p->dl.rb_node);
- init_dl_task_timer(&p->dl);
- init_dl_inactive_task_timer(&p->dl);
- __dl_clear_params(p);
+ init_dl_entity(&p->dl);
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;
@@ -6004,12 +6027,27 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
p = pick_next_task_idle(rq);
}
+ /*
+ * This is the fast path; it cannot be a DL server pick;
+ * therefore even if @p == @prev, ->dl_server must be NULL.
+ */
+ if (p->dl_server)
+ p->dl_server = NULL;
+
return p;
}
restart:
put_prev_task_balance(rq, prev, rf);
+ /*
+ * We've updated @prev and no longer need the server link, clear it.
+ * Must be done before ->pick_next_task() because that can (re)set
+ * ->dl_server.
+ */
+ if (prev->dl_server)
+ prev->dl_server = NULL;
+
for_each_class(class) {
p = class->pick_next_task(rq);
if (p)
@@ -7429,18 +7467,13 @@ int sched_core_idle_cpu(int cpu)
* required to meet deadlines.
*/
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
- enum cpu_util_type type,
- struct task_struct *p)
+ unsigned long *min,
+ unsigned long *max)
{
- unsigned long dl_util, util, irq, max;
+ unsigned long util, irq, scale;
struct rq *rq = cpu_rq(cpu);
- max = arch_scale_cpu_capacity(cpu);
-
- if (!uclamp_is_used() &&
- type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
- return max;
- }
+ scale = arch_scale_cpu_capacity(cpu);
/*
* Early check to see if IRQ/steal time saturates the CPU, can be
@@ -7448,45 +7481,49 @@ unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
* update_irq_load_avg().
*/
irq = cpu_util_irq(rq);
- if (unlikely(irq >= max))
- return max;
+ if (unlikely(irq >= scale)) {
+ if (min)
+ *min = scale;
+ if (max)
+ *max = scale;
+ return scale;
+ }
+
+ if (min) {
+ /*
+ * The minimum utilization returns the highest level between:
+ * - the computed DL bandwidth needed with the IRQ pressure which
+ * steals time to the deadline task.
+ * - The minimum performance requirement for CFS and/or RT.
+ */
+ *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
+
+ /*
+ * When an RT task is runnable and uclamp is not used, we must
+ * ensure that the task will run at maximum compute capacity.
+ */
+ if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
+ *min = max(*min, scale);
+ }
/*
* Because the time spend on RT/DL tasks is visible as 'lost' time to
* CFS tasks and we use the same metric to track the effective
* utilization (PELT windows are synchronized) we can directly add them
* to obtain the CPU's actual utilization.
- *
- * CFS and RT utilization can be boosted or capped, depending on
- * utilization clamp constraints requested by currently RUNNABLE
- * tasks.
- * When there are no CFS RUNNABLE tasks, clamps are released and
- * frequency will be gracefully reduced with the utilization decay.
*/
util = util_cfs + cpu_util_rt(rq);
- if (type == FREQUENCY_UTIL)
- util = uclamp_rq_util_with(rq, util, p);
-
- dl_util = cpu_util_dl(rq);
+ util += cpu_util_dl(rq);
/*
- * For frequency selection we do not make cpu_util_dl() a permanent part
- * of this sum because we want to use cpu_bw_dl() later on, but we need
- * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
- * that we select f_max when there is no idle time.
- *
- * NOTE: numerical errors or stop class might cause us to not quite hit
- * saturation when we should -- something for later.
+ * The maximum hint is a soft bandwidth requirement, which can be lower
+ * than the actual utilization because of uclamp_max requirements.
*/
- if (util + dl_util >= max)
- return max;
+ if (max)
+ *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
- /*
- * OTOH, for energy computation we need the estimated running time, so
- * include util_dl and ignore dl_bw.
- */
- if (type == ENERGY_UTIL)
- util += dl_util;
+ if (util >= scale)
+ return scale;
/*
* There is still idle time; further improve the number by using the
@@ -7497,28 +7534,15 @@ unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
* U' = irq + --------- * U
* max
*/
- util = scale_irq_capacity(util, irq, max);
+ util = scale_irq_capacity(util, irq, scale);
util += irq;
- /*
- * Bandwidth required by DEADLINE must always be granted while, for
- * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
- * to gracefully reduce the frequency when no tasks show up for longer
- * periods of time.
- *
- * Ideally we would like to set bw_dl as min/guaranteed freq and util +
- * bw_dl as requested freq. However, cpufreq is not yet ready for such
- * an interface. So, we only do the latter for now.
- */
- if (type == FREQUENCY_UTIL)
- util += cpu_bw_dl(rq);
-
- return min(max, util);
+ return min(scale, util);
}
unsigned long sched_cpu_util(int cpu)
{
- return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
+ return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
}
#endif /* CONFIG_SMP */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 5888176354e2..95c3c097083e 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -47,7 +47,7 @@ struct sugov_cpu {
u64 last_update;
unsigned long util;
- unsigned long bw_dl;
+ unsigned long bw_min;
/* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON
@@ -115,6 +115,28 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy)
}
/**
+ * get_capacity_ref_freq - get the reference frequency that has been used to
+ * correlate frequency and compute capacity for a given cpufreq policy. We use
+ * the CPU managing it for the arch_scale_freq_ref() call in the function.
+ * @policy: the cpufreq policy of the CPU in question.
+ *
+ * Return: the reference CPU frequency to compute a capacity.
+ */
+static __always_inline
+unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy)
+{
+ unsigned int freq = arch_scale_freq_ref(policy->cpu);
+
+ if (freq)
+ return freq;
+
+ if (arch_scale_freq_invariant())
+ return policy->cpuinfo.max_freq;
+
+ return policy->cur;
+}
+
+/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
@@ -140,10 +162,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
unsigned long util, unsigned long max)
{
struct cpufreq_policy *policy = sg_policy->policy;
- unsigned int freq = arch_scale_freq_invariant() ?
- policy->cpuinfo.max_freq : policy->cur;
+ unsigned int freq;
- util = map_util_perf(util);
+ freq = get_capacity_ref_freq(policy);
freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
@@ -153,14 +174,31 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
+ unsigned long min,
+ unsigned long max)
+{
+ /* Add dvfs headroom to actual utilization */
+ actual = map_util_perf(actual);
+ /* Actually we don't need to target the max performance */
+ if (actual < max)
+ max = actual;
+
+ /*
+ * Ensure at least minimum performance while providing more compute
+ * capacity when possible.
+ */
+ return max(min, max);
+}
+
+static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
{
- unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
- struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu);
- sg_cpu->bw_dl = cpu_bw_dl(rq);
- sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util,
- FREQUENCY_UTIL, NULL);
+ util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
+ util = max(util, boost);
+ sg_cpu->bw_min = min;
+ sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
}
/**
@@ -251,18 +289,16 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations.
*/
-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
unsigned long max_cap)
{
- unsigned long boost;
-
/* No boost currently required */
if (!sg_cpu->iowait_boost)
- return;
+ return 0;
/* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false))
- return;
+ return 0;
if (!sg_cpu->iowait_boost_pending) {
/*
@@ -271,7 +307,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
- return;
+ return 0;
}
}
@@ -281,10 +317,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
* sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare.
*/
- boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
- boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
- if (sg_cpu->util < boost)
- sg_cpu->util = boost;
+ return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -306,7 +339,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
{
- if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
sg_cpu->sg_policy->limits_changed = true;
}
@@ -314,6 +347,8 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
u64 time, unsigned long max_cap,
unsigned int flags)
{
+ unsigned long boost;
+
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -322,8 +357,8 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
return false;
- sugov_get_util(sg_cpu);
- sugov_iowait_apply(sg_cpu, time, max_cap);
+ boost = sugov_iowait_apply(sg_cpu, time, max_cap);
+ sugov_get_util(sg_cpu, boost);
return true;
}
@@ -407,8 +442,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
sg_cpu->util = prev_util;
- cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
- map_util_perf(sg_cpu->util), max_cap);
+ cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
+ sg_cpu->util, max_cap);
sg_cpu->sg_policy->last_freq_update_time = time;
}
@@ -424,9 +459,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
+ unsigned long boost;
- sugov_get_util(j_sg_cpu);
- sugov_iowait_apply(j_sg_cpu, time, max_cap);
+ boost = sugov_iowait_apply(j_sg_cpu, time, max_cap);
+ sugov_get_util(j_sg_cpu, boost);
util = max(j_sg_cpu->util, util);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b28114478b82..a04a436af8cc 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -54,8 +54,14 @@ static int __init sched_dl_sysctl_init(void)
late_initcall(sched_dl_sysctl_init);
#endif
+static bool dl_server(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_server;
+}
+
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
+ BUG_ON(dl_server(dl_se));
return container_of(dl_se, struct task_struct, dl);
}
@@ -64,12 +70,19 @@ static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
return container_of(dl_rq, struct rq, dl);
}
-static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
+static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
{
- struct task_struct *p = dl_task_of(dl_se);
- struct rq *rq = task_rq(p);
+ struct rq *rq = dl_se->rq;
+
+ if (!dl_server(dl_se))
+ rq = task_rq(dl_task_of(dl_se));
+
+ return rq;
+}
- return &rq->dl;
+static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
+{
+ return &rq_of_dl_se(dl_se)->dl;
}
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
@@ -335,6 +348,8 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
__add_rq_bw(new_bw, &rq->dl);
}
+static void __dl_clear_params(struct sched_dl_entity *dl_se);
+
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
@@ -389,12 +404,11 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
* up, and checks if the task is still in the "ACTIVE non contending"
* state or not (in the second case, it updates running_bw).
*/
-static void task_non_contending(struct task_struct *p)
+static void task_non_contending(struct sched_dl_entity *dl_se)
{
- struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->inactive_timer;
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
+ struct rq *rq = rq_of_dl_se(dl_se);
+ struct dl_rq *dl_rq = &rq->dl;
s64 zerolag_time;
/*
@@ -424,24 +438,33 @@ static void task_non_contending(struct task_struct *p)
* utilization now, instead of starting a timer
*/
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
- if (dl_task(p))
+ if (dl_server(dl_se)) {
sub_running_bw(dl_se, dl_rq);
- if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
- struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
-
- if (READ_ONCE(p->__state) == TASK_DEAD)
- sub_rq_bw(&p->dl, &rq->dl);
- raw_spin_lock(&dl_b->lock);
- __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
- raw_spin_unlock(&dl_b->lock);
- __dl_clear_params(p);
+ } else {
+ struct task_struct *p = dl_task_of(dl_se);
+
+ if (dl_task(p))
+ sub_running_bw(dl_se, dl_rq);
+
+ if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
+ if (READ_ONCE(p->__state) == TASK_DEAD)
+ sub_rq_bw(dl_se, &rq->dl);
+ raw_spin_lock(&dl_b->lock);
+ __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
+ raw_spin_unlock(&dl_b->lock);
+ __dl_clear_params(dl_se);
+ }
}
return;
}
dl_se->dl_non_contending = 1;
- get_task_struct(p);
+ if (!dl_server(dl_se))
+ get_task_struct(dl_task_of(dl_se));
+
hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
}
@@ -468,8 +491,10 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
* will not touch the rq's active utilization,
* so we are still safe.
*/
- if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
- put_task_struct(dl_task_of(dl_se));
+ if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
+ if (!dl_server(dl_se))
+ put_task_struct(dl_task_of(dl_se));
+ }
} else {
/*
* Since "dl_non_contending" is not set, the
@@ -482,10 +507,8 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
}
}
-static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
+static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
- struct sched_dl_entity *dl_se = &p->dl;
-
return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
}
@@ -737,8 +760,10 @@ static inline void deadline_queue_pull_task(struct rq *rq)
}
#endif /* CONFIG_SMP */
+static void
+enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
-static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
+static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
@@ -986,8 +1011,7 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
*/
static void update_dl_entity(struct sched_dl_entity *dl_se)
{
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
+ struct rq *rq = rq_of_dl_se(dl_se);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, rq_clock(rq))) {
@@ -1018,11 +1042,11 @@ static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
* actually started or not (i.e., the replenishment instant is in
* the future or in the past).
*/
-static int start_dl_timer(struct task_struct *p)
+static int start_dl_timer(struct sched_dl_entity *dl_se)
{
- struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->dl_timer;
- struct rq *rq = task_rq(p);
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
ktime_t now, act;
s64 delta;
@@ -1056,13 +1080,33 @@ static int start_dl_timer(struct task_struct *p)
* and observe our state.
*/
if (!hrtimer_is_queued(timer)) {
- get_task_struct(p);
+ if (!dl_server(dl_se))
+ get_task_struct(dl_task_of(dl_se));
hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
}
return 1;
}
+static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Queueing this task back might have overloaded rq, check if we need
+ * to kick someone away.
+ */
+ if (has_pushable_dl_tasks(rq)) {
+ /*
+ * Nothing relies on rq->lock after this, so its safe to drop
+ * rq->lock.
+ */
+ rq_unpin_lock(rq, rf);
+ push_dl_task(rq);
+ rq_repin_lock(rq, rf);
+ }
+#endif
+}
+
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
@@ -1081,10 +1125,34 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
dl_timer);
- struct task_struct *p = dl_task_of(dl_se);
+ struct task_struct *p;
struct rq_flags rf;
struct rq *rq;
+ if (dl_server(dl_se)) {
+ struct rq *rq = rq_of_dl_se(dl_se);
+ struct rq_flags rf;
+
+ rq_lock(rq, &rf);
+ if (dl_se->dl_throttled) {
+ sched_clock_tick();
+ update_rq_clock(rq);
+
+ if (dl_se->server_has_tasks(dl_se)) {
+ enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
+ resched_curr(rq);
+ __push_dl_task(rq, &rf);
+ } else {
+ replenish_dl_entity(dl_se);
+ }
+
+ }
+ rq_unlock(rq, &rf);
+
+ return HRTIMER_NORESTART;
+ }
+
+ p = dl_task_of(dl_se);
rq = task_rq_lock(p, &rf);
/*
@@ -1155,21 +1223,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
else
resched_curr(rq);
-#ifdef CONFIG_SMP
- /*
- * Queueing this task back might have overloaded rq, check if we need
- * to kick someone away.
- */
- if (has_pushable_dl_tasks(rq)) {
- /*
- * Nothing relies on rq->lock after this, so its safe to drop
- * rq->lock.
- */
- rq_unpin_lock(rq, &rf);
- push_dl_task(rq);
- rq_repin_lock(rq, &rf);
- }
-#endif
+ __push_dl_task(rq, &rf);
unlock:
task_rq_unlock(rq, p, &rf);
@@ -1183,7 +1237,7 @@ unlock:
return HRTIMER_NORESTART;
}
-void init_dl_task_timer(struct sched_dl_entity *dl_se)
+static void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
@@ -1211,12 +1265,11 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
*/
static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
{
- struct task_struct *p = dl_task_of(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+ struct rq *rq = rq_of_dl_se(dl_se);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
- if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
@@ -1267,44 +1320,19 @@ static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
return (delta * u_act) >> BW_SHIFT;
}
-/*
- * Update the current task's runtime statistics (provided it is still
- * a -deadline task and has not been removed from the dl_rq).
- */
-static void update_curr_dl(struct rq *rq)
+static inline void
+update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
+ int flags);
+static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
{
- struct task_struct *curr = rq->curr;
- struct sched_dl_entity *dl_se = &curr->dl;
- u64 delta_exec, scaled_delta_exec;
- int cpu = cpu_of(rq);
- u64 now;
-
- if (!dl_task(curr) || !on_dl_rq(dl_se))
- return;
+ s64 scaled_delta_exec;
- /*
- * Consumed budget is computed considering the time as
- * observed by schedulable tasks (excluding time spent
- * in hardirq context, etc.). Deadlines are instead
- * computed using hard walltime. This seems to be the more
- * natural solution, but the full ramifications of this
- * approach need further study.
- */
- now = rq_clock_task(rq);
- delta_exec = now - curr->se.exec_start;
- if (unlikely((s64)delta_exec <= 0)) {
+ if (unlikely(delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
return;
}
- schedstat_set(curr->stats.exec_max,
- max(curr->stats.exec_max, delta_exec));
-
- trace_sched_stat_runtime(curr, delta_exec, 0);
-
- update_current_exec_runtime(curr, now, delta_exec);
-
if (dl_entity_is_special(dl_se))
return;
@@ -1316,10 +1344,9 @@ static void update_curr_dl(struct rq *rq)
* according to current frequency and CPU maximum capacity.
*/
if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
- scaled_delta_exec = grub_reclaim(delta_exec,
- rq,
- &curr->dl);
+ scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
} else {
+ int cpu = cpu_of(rq);
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
@@ -1338,11 +1365,20 @@ throttle:
(dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se->dl_overrun = 1;
- __dequeue_task_dl(rq, curr, 0);
- if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
- enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
+ dequeue_dl_entity(dl_se, 0);
+ if (!dl_server(dl_se)) {
+ update_stats_dequeue_dl(&rq->dl, dl_se, 0);
+ dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
+ }
+
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
+ if (dl_server(dl_se))
+ enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
+ else
+ enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
+ }
- if (!is_leftmost(curr, &rq->dl))
+ if (!is_leftmost(dl_se, &rq->dl))
resched_curr(rq);
}
@@ -1372,20 +1408,82 @@ throttle:
}
}
+void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
+{
+ update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
+}
+
+void dl_server_start(struct sched_dl_entity *dl_se)
+{
+ if (!dl_server(dl_se)) {
+ dl_se->dl_server = 1;
+ setup_new_dl_entity(dl_se);
+ }
+ enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
+}
+
+void dl_server_stop(struct sched_dl_entity *dl_se)
+{
+ dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
+}
+
+void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
+ dl_server_has_tasks_f has_tasks,
+ dl_server_pick_f pick)
+{
+ dl_se->rq = rq;
+ dl_se->server_has_tasks = has_tasks;
+ dl_se->server_pick = pick;
+}
+
+/*
+ * Update the current task's runtime statistics (provided it is still
+ * a -deadline task and has not been removed from the dl_rq).
+ */
+static void update_curr_dl(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ struct sched_dl_entity *dl_se = &curr->dl;
+ s64 delta_exec;
+
+ if (!dl_task(curr) || !on_dl_rq(dl_se))
+ return;
+
+ /*
+ * Consumed budget is computed considering the time as
+ * observed by schedulable tasks (excluding time spent
+ * in hardirq context, etc.). Deadlines are instead
+ * computed using hard walltime. This seems to be the more
+ * natural solution, but the full ramifications of this
+ * approach need further study.
+ */
+ delta_exec = update_curr_common(rq);
+ update_curr_dl_se(rq, dl_se, delta_exec);
+}
+
static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
inactive_timer);
- struct task_struct *p = dl_task_of(dl_se);
+ struct task_struct *p = NULL;
struct rq_flags rf;
struct rq *rq;
- rq = task_rq_lock(p, &rf);
+ if (!dl_server(dl_se)) {
+ p = dl_task_of(dl_se);
+ rq = task_rq_lock(p, &rf);
+ } else {
+ rq = dl_se->rq;
+ rq_lock(rq, &rf);
+ }
sched_clock_tick();
update_rq_clock(rq);
+ if (dl_server(dl_se))
+ goto no_task;
+
if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
@@ -1398,23 +1496,30 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
- __dl_clear_params(p);
+ __dl_clear_params(dl_se);
goto unlock;
}
+
+no_task:
if (dl_se->dl_non_contending == 0)
goto unlock;
sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
- task_rq_unlock(rq, p, &rf);
- put_task_struct(p);
+
+ if (!dl_server(dl_se)) {
+ task_rq_unlock(rq, p, &rf);
+ put_task_struct(p);
+ } else {
+ rq_unlock(rq, &rf);
+ }
return HRTIMER_NORESTART;
}
-void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
+static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->inactive_timer;
@@ -1472,10 +1577,8 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
- int prio = dl_task_of(dl_se)->prio;
u64 deadline = dl_se->deadline;
- WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
@@ -1485,9 +1588,6 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
static inline
void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
- int prio = dl_task_of(dl_se)->prio;
-
- WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
@@ -1609,6 +1709,41 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
/*
+ * Check if a constrained deadline task was activated
+ * after the deadline but before the next period.
+ * If that is the case, the task will be throttled and
+ * the replenishment timer will be set to the next period.
+ */
+ if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
+ dl_check_constrained_dl(dl_se);
+
+ if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+
+ add_rq_bw(dl_se, dl_rq);
+ add_running_bw(dl_se, dl_rq);
+ }
+
+ /*
+ * If p is throttled, we do not enqueue it. In fact, if it exhausted
+ * its budget it needs a replenishment and, since it now is on
+ * its rq, the bandwidth timer callback (which clearly has not
+ * run yet) will take care of this.
+ * However, the active utilization does not depend on the fact
+ * that the task is on the runqueue or not (but depends on the
+ * task's state - in GRUB parlance, "inactive" vs "active contending").
+ * In other words, even if a task is throttled its utilization must
+ * be counted in the active utilization; hence, we need to call
+ * add_running_bw().
+ */
+ if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
+ if (flags & ENQUEUE_WAKEUP)
+ task_contending(dl_se, flags);
+
+ return;
+ }
+
+ /*
* If this is a wakeup or a new instance, the scheduling
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
@@ -1619,17 +1754,35 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
- dl_time_before(dl_se->deadline,
- rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
+ dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
}
-static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
+static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
__dequeue_dl_entity(dl_se);
+
+ if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
+ struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+
+ sub_running_bw(dl_se, dl_rq);
+ sub_rq_bw(dl_se, dl_rq);
+ }
+
+ /*
+ * This check allows to start the inactive timer (or to immediately
+ * decrease the active utilization, if needed) in two cases:
+ * when the task blocks and when it is terminating
+ * (p->state == TASK_DEAD). We can handle the two cases in the same
+ * way, because from GRUB's point of view the same thing is happening
+ * (the task moves from "active contending" to "active non contending"
+ * or "inactive")
+ */
+ if (flags & DEQUEUE_SLEEP)
+ task_non_contending(dl_se);
}
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -1674,76 +1827,31 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
return;
}
- /*
- * Check if a constrained deadline task was activated
- * after the deadline but before the next period.
- * If that is the case, the task will be throttled and
- * the replenishment timer will be set to the next period.
- */
- if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
- dl_check_constrained_dl(&p->dl);
-
- if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
- add_rq_bw(&p->dl, &rq->dl);
- add_running_bw(&p->dl, &rq->dl);
- }
-
- /*
- * If p is throttled, we do not enqueue it. In fact, if it exhausted
- * its budget it needs a replenishment and, since it now is on
- * its rq, the bandwidth timer callback (which clearly has not
- * run yet) will take care of this.
- * However, the active utilization does not depend on the fact
- * that the task is on the runqueue or not (but depends on the
- * task's state - in GRUB parlance, "inactive" vs "active contending").
- * In other words, even if a task is throttled its utilization must
- * be counted in the active utilization; hence, we need to call
- * add_running_bw().
- */
- if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
- if (flags & ENQUEUE_WAKEUP)
- task_contending(&p->dl, flags);
-
- return;
- }
-
check_schedstat_required();
update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
+ if (p->on_rq == TASK_ON_RQ_MIGRATING)
+ flags |= ENQUEUE_MIGRATING;
+
enqueue_dl_entity(&p->dl, flags);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
- enqueue_pushable_dl_task(rq, p);
-}
+ if (dl_server(&p->dl))
+ return;
-static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
-{
- update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
- dequeue_dl_entity(&p->dl);
- dequeue_pushable_dl_task(rq, p);
+ if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
+ enqueue_pushable_dl_task(rq, p);
}
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
- __dequeue_task_dl(rq, p, flags);
- if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
- sub_running_bw(&p->dl, &rq->dl);
- sub_rq_bw(&p->dl, &rq->dl);
- }
+ if (p->on_rq == TASK_ON_RQ_MIGRATING)
+ flags |= DEQUEUE_MIGRATING;
- /*
- * This check allows to start the inactive timer (or to immediately
- * decrease the active utilization, if needed) in two cases:
- * when the task blocks and when it is terminating
- * (p->state == TASK_DEAD). We can handle the two cases in the same
- * way, because from GRUB's point of view the same thing is happening
- * (the task moves from "active contending" to "active non contending"
- * or "inactive")
- */
- if (flags & DEQUEUE_SLEEP)
- task_non_contending(p);
+ dequeue_dl_entity(&p->dl, flags);
+ if (!p->dl.dl_throttled && !dl_server(&p->dl))
+ dequeue_pushable_dl_task(rq, p);
}
/*
@@ -1933,12 +2041,12 @@ static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
}
#ifdef CONFIG_SCHED_HRTICK
-static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
+static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
{
- hrtick_start(rq, p->dl.runtime);
+ hrtick_start(rq, dl_se->runtime);
}
#else /* !CONFIG_SCHED_HRTICK */
-static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
+static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
{
}
#endif
@@ -1958,9 +2066,6 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
if (!first)
return;
- if (hrtick_enabled_dl(rq))
- start_hrtick_dl(rq, p);
-
if (rq->curr->sched_class != &dl_sched_class)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
@@ -1983,12 +2088,25 @@ static struct task_struct *pick_task_dl(struct rq *rq)
struct dl_rq *dl_rq = &rq->dl;
struct task_struct *p;
+again:
if (!sched_dl_runnable(rq))
return NULL;
dl_se = pick_next_dl_entity(dl_rq);
WARN_ON_ONCE(!dl_se);
- p = dl_task_of(dl_se);
+
+ if (dl_server(dl_se)) {
+ p = dl_se->server_pick(dl_se);
+ if (!p) {
+ WARN_ON_ONCE(1);
+ dl_se->dl_yielded = 1;
+ update_curr_dl_se(rq, dl_se, 0);
+ goto again;
+ }
+ p->dl_server = dl_se;
+ } else {
+ p = dl_task_of(dl_se);
+ }
return p;
}
@@ -1998,9 +2116,15 @@ static struct task_struct *pick_next_task_dl(struct rq *rq)
struct task_struct *p;
p = pick_task_dl(rq);
- if (p)
+ if (!p)
+ return p;
+
+ if (!p->dl_server)
set_next_task_dl(rq, p, true);
+ if (hrtick_enabled(rq))
+ start_hrtick_dl(rq, &p->dl);
+
return p;
}
@@ -2038,8 +2162,8 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
* be set and schedule() will start a new hrtick for the next task.
*/
if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
- is_leftmost(p, &rq->dl))
- start_hrtick_dl(rq, p);
+ is_leftmost(&p->dl, &rq->dl))
+ start_hrtick_dl(rq, &p->dl);
}
static void task_fork_dl(struct task_struct *p)
@@ -2558,7 +2682,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
* will reset the task parameters.
*/
if (task_on_rq_queued(p) && p->dl.dl_runtime)
- task_non_contending(p);
+ task_non_contending(&p->dl);
/*
* In case a task is setscheduled out from SCHED_DEADLINE we need to
@@ -2966,10 +3090,8 @@ bool __checkparam_dl(const struct sched_attr *attr)
/*
* This function clears the sched_dl_entity static params.
*/
-void __dl_clear_params(struct task_struct *p)
+static void __dl_clear_params(struct sched_dl_entity *dl_se)
{
- struct sched_dl_entity *dl_se = &p->dl;
-
dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0;
dl_se->dl_period = 0;
@@ -2981,12 +3103,21 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
+ dl_se->dl_server = 0;
#ifdef CONFIG_RT_MUTEXES
dl_se->pi_se = dl_se;
#endif
}
+void init_dl_entity(struct sched_dl_entity *dl_se)
+{
+ RB_CLEAR_NODE(&dl_se->rb_node);
+ init_dl_task_timer(dl_se);
+ init_dl_inactive_task_timer(dl_se);
+ __dl_clear_params(dl_se);
+}
+
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4580a450700e..8d5d98a5834d 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -628,8 +628,8 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, spread;
- struct sched_entity *last, *first;
+ s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
+ struct sched_entity *last, *first, *root;
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -644,15 +644,20 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags);
+ root = __pick_root_entity(cfs_rq);
+ if (root)
+ left_vruntime = root->min_vruntime;
first = __pick_first_entity(cfs_rq);
if (first)
- left_vruntime = first->vruntime;
+ left_deadline = first->deadline;
last = __pick_last_entity(cfs_rq);
if (last)
right_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
raw_spin_rq_unlock_irqrestore(rq, flags);
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
+ SPLIT_NS(left_deadline));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
SPLIT_NS(left_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
@@ -679,8 +684,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->avg.runnable_avg);
SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
cfs_rq->avg.util_avg);
- SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
- cfs_rq->avg.util_est.enqueued);
+ SEQ_printf(m, " .%-30s: %u\n", "util_est",
+ cfs_rq->avg.util_est);
SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
cfs_rq->removed.load_avg);
SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
@@ -1070,8 +1075,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.runnable_avg);
P(se.avg.util_avg);
P(se.avg.last_update_time);
- P(se.avg.util_est.ewma);
- PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
+ PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d7a3c63a2171..533547e3c90a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -551,7 +551,11 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
static inline bool entity_before(const struct sched_entity *a,
const struct sched_entity *b)
{
- return (s64)(a->vruntime - b->vruntime) < 0;
+ /*
+ * Tiebreak on vruntime seems unnecessary since it can
+ * hardly happen.
+ */
+ return (s64)(a->deadline - b->deadline) < 0;
}
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -720,7 +724,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Note: using 'avg_vruntime() > se->vruntime' is inacurate due
* to the loss in precision caused by the division.
*/
-int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
@@ -733,7 +737,12 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
load += weight;
}
- return avg >= entity_key(cfs_rq, se) * load;
+ return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
+}
+
+int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ return vruntime_eligible(cfs_rq, se->vruntime);
}
static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
@@ -752,9 +761,8 @@ static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_first_entity(cfs_rq);
+ struct sched_entity *se = __pick_root_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
-
u64 vruntime = cfs_rq->min_vruntime;
if (curr) {
@@ -766,9 +774,9 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
if (se) {
if (!curr)
- vruntime = se->vruntime;
+ vruntime = se->min_vruntime;
else
- vruntime = min_vruntime(vruntime, se->vruntime);
+ vruntime = min_vruntime(vruntime, se->min_vruntime);
}
/* ensure we never gain time by being placed backwards. */
@@ -781,34 +789,34 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
return entity_before(__node_2_se(a), __node_2_se(b));
}
-#define deadline_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+#define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
-static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
+static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
{
if (node) {
struct sched_entity *rse = __node_2_se(node);
- if (deadline_gt(min_deadline, se, rse))
- se->min_deadline = rse->min_deadline;
+ if (vruntime_gt(min_vruntime, se, rse))
+ se->min_vruntime = rse->min_vruntime;
}
}
/*
- * se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
+ * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
*/
-static inline bool min_deadline_update(struct sched_entity *se, bool exit)
+static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
{
- u64 old_min_deadline = se->min_deadline;
+ u64 old_min_vruntime = se->min_vruntime;
struct rb_node *node = &se->run_node;
- se->min_deadline = se->deadline;
- __update_min_deadline(se, node->rb_right);
- __update_min_deadline(se, node->rb_left);
+ se->min_vruntime = se->vruntime;
+ __min_vruntime_update(se, node->rb_right);
+ __min_vruntime_update(se, node->rb_left);
- return se->min_deadline == old_min_deadline;
+ return se->min_vruntime == old_min_vruntime;
}
-RB_DECLARE_CALLBACKS(static, min_deadline_cb, struct sched_entity,
- run_node, min_deadline, min_deadline_update);
+RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
+ run_node, min_vruntime, min_vruntime_update);
/*
* Enqueue an entity into the rb-tree:
@@ -816,18 +824,28 @@ RB_DECLARE_CALLBACKS(static, min_deadline_cb, struct sched_entity,
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
- se->min_deadline = se->deadline;
+ se->min_vruntime = se->vruntime;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
- __entity_less, &min_deadline_cb);
+ __entity_less, &min_vruntime_cb);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
- &min_deadline_cb);
+ &min_vruntime_cb);
avg_vruntime_sub(cfs_rq, se);
}
+struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
+{
+ struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;
+
+ if (!root)
+ return NULL;
+
+ return __node_2_se(root);
+}
+
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
@@ -850,23 +868,29 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
* with the earliest virtual deadline.
*
* We can do this in O(log n) time due to an augmented RB-tree. The
- * tree keeps the entries sorted on service, but also functions as a
- * heap based on the deadline by keeping:
+ * tree keeps the entries sorted on deadline, but also functions as a
+ * heap based on the vruntime by keeping:
*
- * se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
+ * se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
*
- * Which allows an EDF like search on (sub)trees.
+ * Which allows tree pruning through eligibility.
*/
-static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
+ struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
- struct sched_entity *best_left = NULL;
+
+ /*
+ * We can safely skip eligibility check if there is only one entity
+ * in this cfs_rq, saving some cycles.
+ */
+ if (cfs_rq->nr_running == 1)
+ return curr && curr->on_rq ? curr : se;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
- best = curr;
/*
* Once selected, run a task until it either becomes non-eligible or
@@ -875,95 +899,45 @@ static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq)
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
return curr;
+ /* Pick the leftmost entity if it's eligible */
+ if (se && entity_eligible(cfs_rq, se)) {
+ best = se;
+ goto found;
+ }
+
+ /* Heap search for the EEVD entity */
while (node) {
- struct sched_entity *se = __node_2_se(node);
+ struct rb_node *left = node->rb_left;
/*
- * If this entity is not eligible, try the left subtree.
+ * Eligible entities in left subtree are always better
+ * choices, since they have earlier deadlines.
*/
- if (!entity_eligible(cfs_rq, se)) {
- node = node->rb_left;
+ if (left && vruntime_eligible(cfs_rq,
+ __node_2_se(left)->min_vruntime)) {
+ node = left;
continue;
}
- /*
- * Now we heap search eligible trees for the best (min_)deadline
- */
- if (!best || deadline_gt(deadline, best, se))
- best = se;
+ se = __node_2_se(node);
/*
- * Every se in a left branch is eligible, keep track of the
- * branch with the best min_deadline
+ * The left subtree either is empty or has no eligible
+ * entity, so check the current node since it is the one
+ * with earliest deadline that might be eligible.
*/
- if (node->rb_left) {
- struct sched_entity *left = __node_2_se(node->rb_left);
-
- if (!best_left || deadline_gt(min_deadline, best_left, left))
- best_left = left;
-
- /*
- * min_deadline is in the left branch. rb_left and all
- * descendants are eligible, so immediately switch to the second
- * loop.
- */
- if (left->min_deadline == se->min_deadline)
- break;
- }
-
- /* min_deadline is at this node, no need to look right */
- if (se->deadline == se->min_deadline)
+ if (entity_eligible(cfs_rq, se)) {
+ best = se;
break;
-
- /* else min_deadline is in the right branch. */
- node = node->rb_right;
- }
-
- /*
- * We ran into an eligible node which is itself the best.
- * (Or nr_running == 0 and both are NULL)
- */
- if (!best_left || (s64)(best_left->min_deadline - best->deadline) > 0)
- return best;
-
- /*
- * Now best_left and all of its children are eligible, and we are just
- * looking for deadline == min_deadline
- */
- node = &best_left->run_node;
- while (node) {
- struct sched_entity *se = __node_2_se(node);
-
- /* min_deadline is the current node */
- if (se->deadline == se->min_deadline)
- return se;
-
- /* min_deadline is in the left branch */
- if (node->rb_left &&
- __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
- node = node->rb_left;
- continue;
}
- /* else min_deadline is in the right branch */
node = node->rb_right;
}
- return NULL;
-}
-
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
-{
- struct sched_entity *se = __pick_eevdf(cfs_rq);
+found:
+ if (!best || (curr && entity_before(curr, best)))
+ best = curr;
- if (!se) {
- struct sched_entity *left = __pick_first_entity(cfs_rq);
- if (left) {
- pr_err("EEVDF scheduling fail, picking leftmost\n");
- return left;
- }
- }
-
- return se;
+ return best;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -1129,23 +1103,17 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-/*
- * Update the current task's runtime statistics.
- */
-static void update_curr(struct cfs_rq *cfs_rq)
+static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
{
- struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_clock_task(rq_of(cfs_rq));
- u64 delta_exec;
-
- if (unlikely(!curr))
- return;
+ u64 now = rq_clock_task(rq);
+ s64 delta_exec;
delta_exec = now - curr->exec_start;
- if (unlikely((s64)delta_exec <= 0))
- return;
+ if (unlikely(delta_exec <= 0))
+ return delta_exec;
curr->exec_start = now;
+ curr->sum_exec_runtime += delta_exec;
if (schedstat_enabled()) {
struct sched_statistics *stats;
@@ -1155,20 +1123,54 @@ static void update_curr(struct cfs_rq *cfs_rq)
max(delta_exec, stats->exec_max));
}
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq->exec_clock, delta_exec);
+ return delta_exec;
+}
+
+static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
+{
+ trace_sched_stat_runtime(p, delta_exec);
+ account_group_exec_runtime(p, delta_exec);
+ cgroup_account_cputime(p, delta_exec);
+ if (p->dl_server)
+ dl_server_update(p->dl_server, delta_exec);
+}
+
+/*
+ * Used by other classes to account runtime.
+ */
+s64 update_curr_common(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ s64 delta_exec;
+
+ delta_exec = update_curr_se(rq, &curr->se);
+ if (likely(delta_exec > 0))
+ update_curr_task(curr, delta_exec);
+
+ return delta_exec;
+}
+
+/*
+ * Update the current task's runtime statistics.
+ */
+static void update_curr(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 delta_exec;
+
+ if (unlikely(!curr))
+ return;
+
+ delta_exec = update_curr_se(rq_of(cfs_rq), curr);
+ if (unlikely(delta_exec <= 0))
+ return;
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
- if (entity_is_task(curr)) {
- struct task_struct *curtask = task_of(curr);
-
- trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
- cgroup_account_cputime(curtask, delta_exec);
- account_group_exec_runtime(curtask, delta_exec);
- }
+ if (entity_is_task(curr))
+ update_curr_task(task_of(curr), delta_exec);
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
@@ -3164,7 +3166,7 @@ static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma)
* This is also done to avoid any side effect of task scanning
* amplifying the unfairness of disjoint set of VMAs' access.
*/
- if (READ_ONCE(current->mm->numa_scan_seq) < 2)
+ if ((READ_ONCE(current->mm->numa_scan_seq) - vma->numab_state->start_scan_seq) < 2)
return true;
pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1];
@@ -3307,6 +3309,8 @@ retry_pids:
if (!vma->numab_state)
continue;
+ vma->numab_state->start_scan_seq = mm->numa_scan_seq;
+
vma->numab_state->next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
@@ -3811,17 +3815,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
- if (!curr) {
- /*
- * The entity's vruntime has been adjusted, so let's check
- * whether the rq-wide min_vruntime needs updated too. Since
- * the calculations above require stable min_vruntime rather
- * than up-to-date one, we do the update at the end of the
- * reweight process.
- */
+ if (!curr)
__enqueue_entity(cfs_rq, se);
- update_min_vruntime(cfs_rq);
- }
+
+ /*
+ * The entity's vruntime has been adjusted, so let's check
+ * whether the rq-wide min_vruntime needs updated too. Since
+ * the calculations above require stable min_vruntime rather
+ * than up-to-date one, we do the update at the end of the
+ * reweight process.
+ */
+ update_min_vruntime(cfs_rq);
}
}
@@ -4096,6 +4100,10 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
if (cfs_rq->tg == &root_task_group)
return;
+ /* rq has been offline and doesn't contribute to the share anymore: */
+ if (!cpu_active(cpu_of(rq_of(cfs_rq))))
+ return;
+
/*
* For migration heavy workloads, access to tg->load_avg can be
* unbound. Limit the update rate to at most once per ms.
@@ -4112,6 +4120,49 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
}
+static inline void clear_tg_load_avg(struct cfs_rq *cfs_rq)
+{
+ long delta;
+ u64 now;
+
+ /*
+ * No need to update load_avg for root_task_group, as it is not used.
+ */
+ if (cfs_rq->tg == &root_task_group)
+ return;
+
+ now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
+ delta = 0 - cfs_rq->tg_load_avg_contrib;
+ atomic_long_add(delta, &cfs_rq->tg->load_avg);
+ cfs_rq->tg_load_avg_contrib = 0;
+ cfs_rq->last_update_tg_load_avg = now;
+}
+
+/* CPU offline callback: */
+static void __maybe_unused clear_tg_offline_cfs_rqs(struct rq *rq)
+{
+ struct task_group *tg;
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * The rq clock has already been updated in
+ * set_rq_offline(), so we should skip updating
+ * the rq clock again in unthrottle_cfs_rq().
+ */
+ rq_clock_start_loop_update(rq);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tg, &task_groups, list) {
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+
+ clear_tg_load_avg(cfs_rq);
+ }
+ rcu_read_unlock();
+
+ rq_clock_stop_loop_update(rq);
+}
+
/*
* Called within set_task_rq() right before setting a task's CPU. The
* caller only guarantees p->pi_lock is held; no other assumptions,
@@ -4408,6 +4459,8 @@ static inline bool skip_blocked_update(struct sched_entity *se)
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
+static inline void clear_tg_offline_cfs_rqs(struct rq *rq) {}
+
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
return 0;
@@ -4770,11 +4823,14 @@ static inline unsigned long task_util(struct task_struct *p)
return READ_ONCE(p->se.avg.util_avg);
}
-static inline unsigned long _task_util_est(struct task_struct *p)
+static inline unsigned long task_runnable(struct task_struct *p)
{
- struct util_est ue = READ_ONCE(p->se.avg.util_est);
+ return READ_ONCE(p->se.avg.runnable_avg);
+}
- return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
+static inline unsigned long _task_util_est(struct task_struct *p)
+{
+ return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED;
}
static inline unsigned long task_util_est(struct task_struct *p)
@@ -4791,9 +4847,9 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
return;
/* Update root cfs_rq's estimated utilization */
- enqueued = cfs_rq->avg.util_est.enqueued;
+ enqueued = cfs_rq->avg.util_est;
enqueued += _task_util_est(p);
- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+ WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
trace_sched_util_est_cfs_tp(cfs_rq);
}
@@ -4807,34 +4863,20 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
return;
/* Update root cfs_rq's estimated utilization */
- enqueued = cfs_rq->avg.util_est.enqueued;
+ enqueued = cfs_rq->avg.util_est;
enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+ WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
trace_sched_util_est_cfs_tp(cfs_rq);
}
#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
-/*
- * Check if a (signed) value is within a specified (unsigned) margin,
- * based on the observation that:
- *
- * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
- *
- * NOTE: this only works when value + margin < INT_MAX.
- */
-static inline bool within_margin(int value, int margin)
-{
- return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
-}
-
static inline void util_est_update(struct cfs_rq *cfs_rq,
struct task_struct *p,
bool task_sleep)
{
- long last_ewma_diff, last_enqueued_diff;
- struct util_est ue;
+ unsigned int ewma, dequeued, last_ewma_diff;
if (!sched_feat(UTIL_EST))
return;
@@ -4846,71 +4888,73 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
if (!task_sleep)
return;
+ /* Get current estimate of utilization */
+ ewma = READ_ONCE(p->se.avg.util_est);
+
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
*/
- ue = p->se.avg.util_est;
- if (ue.enqueued & UTIL_AVG_UNCHANGED)
+ if (ewma & UTIL_AVG_UNCHANGED)
return;
- last_enqueued_diff = ue.enqueued;
+ /* Get utilization at dequeue */
+ dequeued = task_util(p);
/*
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
- ue.enqueued = task_util(p);
- if (sched_feat(UTIL_EST_FASTUP)) {
- if (ue.ewma < ue.enqueued) {
- ue.ewma = ue.enqueued;
- goto done;
- }
+ if (ewma <= dequeued) {
+ ewma = dequeued;
+ goto done;
}
/*
* Skip update of task's estimated utilization when its members are
* already ~1% close to its last activation value.
*/
- last_ewma_diff = ue.enqueued - ue.ewma;
- last_enqueued_diff -= ue.enqueued;
- if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
- if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
- goto done;
-
- return;
- }
+ last_ewma_diff = ewma - dequeued;
+ if (last_ewma_diff < UTIL_EST_MARGIN)
+ goto done;
/*
* To avoid overestimation of actual task utilization, skip updates if
* we cannot grant there is idle time in this CPU.
*/
- if (task_util(p) > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
+ if (dequeued > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
return;
/*
+ * To avoid underestimate of task utilization, skip updates of EWMA if
+ * we cannot grant that thread got all CPU time it wanted.
+ */
+ if ((dequeued + UTIL_EST_MARGIN) < task_runnable(p))
+ goto done;
+
+
+ /*
* Update Task's estimated utilization
*
* When *p completes an activation we can consolidate another sample
- * of the task size. This is done by storing the current PELT value
- * as ue.enqueued and by using this value to update the Exponential
- * Weighted Moving Average (EWMA):
+ * of the task size. This is done by using this value to update the
+ * Exponential Weighted Moving Average (EWMA):
*
* ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
* = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
* = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
- * = w * ( last_ewma_diff ) + ewma(t-1)
- * = w * (last_ewma_diff + ewma(t-1) / w)
+ * = w * ( -last_ewma_diff ) + ewma(t-1)
+ * = w * (-last_ewma_diff + ewma(t-1) / w)
*
* Where 'w' is the weight of new samples, which is configured to be
* 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
*/
- ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
- ue.ewma += last_ewma_diff;
- ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
+ ewma <<= UTIL_EST_WEIGHT_SHIFT;
+ ewma -= last_ewma_diff;
+ ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
- ue.enqueued |= UTIL_AVG_UNCHANGED;
- WRITE_ONCE(p->se.avg.util_est, ue);
+ ewma |= UTIL_AVG_UNCHANGED;
+ WRITE_ONCE(p->se.avg.util_est, ewma);
trace_sched_util_est_se_tp(&p->se);
}
@@ -7638,16 +7682,16 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
if (sched_feat(UTIL_EST)) {
unsigned long util_est;
- util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
+ util_est = READ_ONCE(cfs_rq->avg.util_est);
/*
* During wake-up @p isn't enqueued yet and doesn't contribute
- * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
+ * to any cpu_rq(cpu)->cfs.avg.util_est.
* If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
* has been enqueued.
*
* During exec (@dst_cpu = -1) @p is enqueued and does
- * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
+ * contribute to cpu_rq(cpu)->cfs.util_est.
* Remove it to "simulate" cpu_util without @p's contribution.
*
* Despite the task_on_rq_queued(@p) check there is still a
@@ -7776,7 +7820,7 @@ static inline void eenv_pd_busy_time(struct energy_env *eenv,
for_each_cpu(cpu, pd_cpus) {
unsigned long util = cpu_util(cpu, p, -1, 0);
- busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
+ busy_time += effective_cpu_util(cpu, util, NULL, NULL);
}
eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
@@ -7799,7 +7843,7 @@ eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
for_each_cpu(cpu, pd_cpus) {
struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
unsigned long util = cpu_util(cpu, p, dst_cpu, 1);
- unsigned long eff_util;
+ unsigned long eff_util, min, max;
/*
* Performance domain frequency: utilization clamping
@@ -7808,7 +7852,23 @@ eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
* NOTE: in case RT tasks are running, by default the
* FREQUENCY_UTIL's utilization can be max OPP.
*/
- eff_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
+ eff_util = effective_cpu_util(cpu, util, &min, &max);
+
+ /* Task's uclamp can modify min and max value */
+ if (tsk && uclamp_is_used()) {
+ min = max(min, uclamp_eff_value(p, UCLAMP_MIN));
+
+ /*
+ * If there is no active max uclamp constraint,
+ * directly use task's one, otherwise keep max.
+ */
+ if (uclamp_rq_is_idle(cpu_rq(cpu)))
+ max = uclamp_eff_value(p, UCLAMP_MAX);
+ else
+ max = max(max, uclamp_eff_value(p, UCLAMP_MAX));
+ }
+
+ eff_util = sugov_effective_cpu_perf(cpu, eff_util, min, max);
max_util = max(max_util, eff_util);
}
@@ -8210,7 +8270,6 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int next_buddy_marked = 0;
int cse_is_idle, pse_is_idle;
if (unlikely(se == pse))
@@ -8227,7 +8286,6 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
- next_buddy_marked = 1;
}
/*
@@ -9060,7 +9118,7 @@ static int detach_tasks(struct lb_env *env)
case migrate_util:
util = task_util_est(p);
- if (util > env->imbalance)
+ if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance)
goto next;
env->imbalance -= util;
@@ -12413,6 +12471,9 @@ static void rq_offline_fair(struct rq *rq)
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
+
+ /* Ensure that we remove rq contribution to group share: */
+ clear_tg_offline_cfs_rqs(rq);
}
#endif /* CONFIG_SMP */
@@ -13036,19 +13097,6 @@ next_cpu:
return 0;
}
-#else /* CONFIG_FAIR_GROUP_SCHED */
-
-void free_fair_sched_group(struct task_group *tg) { }
-
-int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
-{
- return 1;
-}
-
-void online_fair_sched_group(struct task_group *tg) { }
-
-void unregister_fair_sched_group(struct task_group *tg) { }
-
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index a3ddf84de430..143f55df890b 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -83,7 +83,6 @@ SCHED_FEAT(WA_BIAS, true)
* UtilEstimation. Use estimated CPU utilization.
*/
SCHED_FEAT(UTIL_EST, true)
-SCHED_FEAT(UTIL_EST_FASTUP, true)
SCHED_FEAT(LATENCY_WARN, false)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 565f8374ddbb..31231925f1ec 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -258,6 +258,36 @@ static void do_idle(void)
while (!need_resched()) {
rmb();
+ /*
+ * Interrupts shouldn't be re-enabled from that point on until
+ * the CPU sleeping instruction is reached. Otherwise an interrupt
+ * may fire and queue a timer that would be ignored until the CPU
+ * wakes from the sleeping instruction. And testing need_resched()
+ * doesn't tell about pending needed timer reprogram.
+ *
+ * Several cases to consider:
+ *
+ * - SLEEP-UNTIL-PENDING-INTERRUPT based instructions such as
+ * "wfi" or "mwait" are fine because they can be entered with
+ * interrupt disabled.
+ *
+ * - sti;mwait() couple is fine because the interrupts are
+ * re-enabled only upon the execution of mwait, leaving no gap
+ * in-between.
+ *
+ * - ROLLBACK based idle handlers with the sleeping instruction
+ * called with interrupts enabled are NOT fine. In this scheme
+ * when the interrupt detects it has interrupted an idle handler,
+ * it rolls back to its beginning which performs the
+ * need_resched() check before re-executing the sleeping
+ * instruction. This can leak a pending needed timer reprogram.
+ * If such a scheme is really mandatory due to the lack of an
+ * appropriate CPU sleeping instruction, then a FAST-FORWARD
+ * must instead be applied: when the interrupt detects it has
+ * interrupted an idle handler, it must resume to the end of
+ * this idle handler so that the generic idle loop is iterated
+ * again to reprogram the tick.
+ */
local_irq_disable();
if (cpu_is_offline(cpu)) {
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 3a0e0dc28721..9e1083465fbc 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -52,13 +52,13 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
return;
/* Avoid store if the flag has been already reset */
- enqueued = avg->util_est.enqueued;
+ enqueued = avg->util_est;
if (!(enqueued & UTIL_AVG_UNCHANGED))
return;
/* Reset flag to report util_avg has been updated */
enqueued &= ~UTIL_AVG_UNCHANGED;
- WRITE_ONCE(avg->util_est.enqueued, enqueued);
+ WRITE_ONCE(avg->util_est, enqueued);
}
static inline u64 rq_clock_pelt(struct rq *rq)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 6aaf0a3d6081..3261b067b67e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1002,24 +1002,15 @@ static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
- u64 delta_exec;
- u64 now;
+ s64 delta_exec;
if (curr->sched_class != &rt_sched_class)
return;
- now = rq_clock_task(rq);
- delta_exec = now - curr->se.exec_start;
- if (unlikely((s64)delta_exec <= 0))
+ delta_exec = update_curr_common(rq);
+ if (unlikely(delta_exec <= 0))
return;
- schedstat_set(curr->stats.exec_max,
- max(curr->stats.exec_max, delta_exec));
-
- trace_sched_stat_runtime(curr, delta_exec, 0);
-
- update_current_exec_runtime(curr, now, delta_exec);
-
if (!rt_bandwidth_enabled())
return;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2e5a95486a42..001fe047bd5d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -273,8 +273,6 @@ struct rt_bandwidth {
unsigned int rt_period_active;
};
-void __dl_clear_params(struct task_struct *p);
-
static inline int dl_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
@@ -315,6 +313,33 @@ extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *att
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int dl_bw_check_overflow(int cpu);
+/*
+ * SCHED_DEADLINE supports servers (nested scheduling) with the following
+ * interface:
+ *
+ * dl_se::rq -- runqueue we belong to.
+ *
+ * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the
+ * server when it runs out of tasks to run.
+ *
+ * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this
+ * returns NULL.
+ *
+ * dl_server_update() -- called from update_curr_common(), propagates runtime
+ * to the server.
+ *
+ * dl_server_start()
+ * dl_server_stop() -- start/stop the server when it has (no) tasks.
+ *
+ * dl_server_init() -- initializes the server.
+ */
+extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec);
+extern void dl_server_start(struct sched_dl_entity *dl_se);
+extern void dl_server_stop(struct sched_dl_entity *dl_se);
+extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
+ dl_server_has_tasks_f has_tasks,
+ dl_server_pick_f pick);
+
#ifdef CONFIG_CGROUP_SCHED
struct cfs_rq;
@@ -436,10 +461,21 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
extern int tg_nop(struct task_group *tg, void *data);
+#ifdef CONFIG_FAIR_GROUP_SCHED
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
extern void online_fair_sched_group(struct task_group *tg);
extern void unregister_fair_sched_group(struct task_group *tg);
+#else
+static inline void free_fair_sched_group(struct task_group *tg) { }
+static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
+{
+ return 1;
+}
+static inline void online_fair_sched_group(struct task_group *tg) { }
+static inline void unregister_fair_sched_group(struct task_group *tg) { }
+#endif
+
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
@@ -2179,6 +2215,10 @@ extern const u32 sched_prio_to_wmult[40];
* MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
* in the runqueue.
*
+ * NOCLOCK - skip the update_rq_clock() (avoids double updates)
+ *
+ * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE)
+ *
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
* ENQUEUE_MIGRATED - the task was migrated during wakeup
@@ -2189,6 +2229,7 @@ extern const u32 sched_prio_to_wmult[40];
#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
+#define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
@@ -2203,6 +2244,7 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_MIGRATED 0x00
#endif
#define ENQUEUE_INITIAL 0x80
+#define ENQUEUE_MIGRATING 0x100
#define RETRY_TASK ((void *)-1UL)
@@ -2212,6 +2254,8 @@ struct affinity_context {
unsigned int flags;
};
+extern s64 update_curr_common(struct rq *rq);
+
struct sched_class {
#ifdef CONFIG_UCLAMP_TASK
@@ -2425,8 +2469,7 @@ extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
-extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
-extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
+extern void init_dl_entity(struct sched_dl_entity *dl_se);
#define BW_SHIFT 20
#define BW_UNIT (1 << BW_SHIFT)
@@ -2822,6 +2865,7 @@ DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
double_rq_lock(_T->lock, _T->lock2),
double_rq_unlock(_T->lock, _T->lock2))
+extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
@@ -2961,24 +3005,14 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#ifdef CONFIG_SMP
-/**
- * enum cpu_util_type - CPU utilization type
- * @FREQUENCY_UTIL: Utilization used to select frequency
- * @ENERGY_UTIL: Utilization used during energy calculation
- *
- * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
- * need to be aggregated differently depending on the usage made of them. This
- * enum is used within effective_cpu_util() to differentiate the types of
- * utilization expected by the callers, and adjust the aggregation accordingly.
- */
-enum cpu_util_type {
- FREQUENCY_UTIL,
- ENERGY_UTIL,
-};
-
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
- enum cpu_util_type type,
- struct task_struct *p);
+ unsigned long *min,
+ unsigned long *max);
+
+unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
+ unsigned long min,
+ unsigned long max);
+
/*
* Verify the fitness of task @p to run on @cpu taking into account the
@@ -3035,59 +3069,6 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
}
-/**
- * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
- * @rq: The rq to clamp against. Must not be NULL.
- * @util: The util value to clamp.
- * @p: The task to clamp against. Can be NULL if you want to clamp
- * against @rq only.
- *
- * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
- *
- * If sched_uclamp_used static key is disabled, then just return the util
- * without any clamping since uclamp aggregation at the rq level in the fast
- * path is disabled, rendering this operation a NOP.
- *
- * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
- * will return the correct effective uclamp value of the task even if the
- * static key is disabled.
- */
-static __always_inline
-unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
- struct task_struct *p)
-{
- unsigned long min_util = 0;
- unsigned long max_util = 0;
-
- if (!static_branch_likely(&sched_uclamp_used))
- return util;
-
- if (p) {
- min_util = uclamp_eff_value(p, UCLAMP_MIN);
- max_util = uclamp_eff_value(p, UCLAMP_MAX);
-
- /*
- * Ignore last runnable task's max clamp, as this task will
- * reset it. Similarly, no need to read the rq's min clamp.
- */
- if (uclamp_rq_is_idle(rq))
- goto out;
- }
-
- min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
- max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
-out:
- /*
- * Since CPU's {min,max}_util clamps are MAX aggregated considering
- * RUNNABLE tasks with _different_ clamps, we can end up with an
- * inversion. Fix it now when the clamps are applied.
- */
- if (unlikely(min_util >= max_util))
- return min_util;
-
- return clamp(util, min_util, max_util);
-}
-
/* Is the rq being capped/throttled by uclamp_max? */
static inline bool uclamp_rq_is_capped(struct rq *rq)
{
@@ -3125,13 +3106,6 @@ static inline unsigned long uclamp_eff_value(struct task_struct *p,
return SCHED_CAPACITY_SCALE;
}
-static inline
-unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
- struct task_struct *p)
-{
- return util;
-}
-
static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
static inline bool uclamp_is_used(void)
@@ -3261,16 +3235,6 @@ extern int sched_dynamic_mode(const char *str);
extern void sched_dynamic_update(int mode);
#endif
-static inline void update_current_exec_runtime(struct task_struct *curr,
- u64 now, u64 delta_exec)
-{
- curr->se.sum_exec_runtime += delta_exec;
- account_group_exec_runtime(curr, delta_exec);
-
- curr->se.exec_start = now;
- cgroup_account_cputime(curr, delta_exec);
-}
-
#ifdef CONFIG_SCHED_MM_CID
#define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 6cf7304e6449..b1b8fe61c532 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -70,18 +70,7 @@ static void yield_task_stop(struct rq *rq)
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
{
- struct task_struct *curr = rq->curr;
- u64 now, delta_exec;
-
- now = rq_clock_task(rq);
- delta_exec = now - curr->se.exec_start;
- if (unlikely((s64)delta_exec < 0))
- delta_exec = 0;
-
- schedstat_set(curr->stats.exec_max,
- max(curr->stats.exec_max, delta_exec));
-
- update_current_exec_runtime(curr, now, delta_exec);
+ update_curr_common(rq);
}
/*
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 255999ba9190..aca7b437882e 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1072,7 +1072,7 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_kn
*/
list_del_init(&addfd->list);
if (!addfd->setfd)
- fd = receive_fd(addfd->file, addfd->flags);
+ fd = receive_fd(addfd->file, NULL, addfd->flags);
else
fd = receive_fd_replace(addfd->fd, addfd->file, addfd->flags);
addfd->ret = fd;
diff --git a/kernel/signal.c b/kernel/signal.c
index 47a7602dfe8d..c9c57d053ce4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -171,16 +171,6 @@ static bool recalc_sigpending_tsk(struct task_struct *t)
return false;
}
-/*
- * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
- * This is superfluous when called on current, the wakeup is a harmless no-op.
- */
-void recalc_sigpending_and_wake(struct task_struct *t)
-{
- if (recalc_sigpending_tsk(t))
- signal_wake_up(t, 0);
-}
-
void recalc_sigpending(void)
{
if (!recalc_sigpending_tsk(current) && !freezing(current))
@@ -1348,10 +1338,8 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
action->sa.sa_handler = SIG_DFL;
if (handler == HANDLER_EXIT)
action->sa.sa_flags |= SA_IMMUTABLE;
- if (blocked) {
+ if (blocked)
sigdelset(&t->blocked, sig);
- recalc_sigpending_and_wake(t);
- }
}
/*
* Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
@@ -1361,6 +1349,9 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
(!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
+ /* This can happen if the signal was already pending and blocked */
+ if (!task_sigpending(t))
+ signal_wake_up(t, 0);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
@@ -1376,12 +1367,12 @@ int force_sig_info(struct kernel_siginfo *info)
*/
int zap_other_threads(struct task_struct *p)
{
- struct task_struct *t = p;
+ struct task_struct *t;
int count = 0;
p->signal->group_stop_count = 0;
- while_each_thread(p, t) {
+ for_other_threads(p, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
/* Don't require de_thread to wait for the vhost_worker */
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
@@ -2465,12 +2456,10 @@ static bool do_signal_stop(int signr)
sig->group_exit_code = signr;
sig->group_stop_count = 0;
-
if (task_set_jobctl_pending(current, signr | gstop))
sig->group_stop_count++;
- t = current;
- while_each_thread(current, t) {
+ for_other_threads(current, t) {
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
@@ -2966,8 +2955,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
if (sigisemptyset(&retarget))
return;
- t = tsk;
- while_each_thread(tsk, t) {
+ for_other_threads(tsk, t) {
if (t->flags & PF_EXITING)
continue;
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 4f65824879ab..afb3c116da91 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -126,7 +126,7 @@ EXPORT_SYMBOL_GPL(stack_trace_save);
/**
* stack_trace_save_tsk - Save a task stack trace into a storage array
- * @task: The task to examine
+ * @tsk: The task to examine
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 9a846439b36a..faad00cce269 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -171,6 +171,9 @@ COND_SYSCALL(landlock_add_rule);
COND_SYSCALL(landlock_restrict_self);
COND_SYSCALL(fadvise64_64);
COND_SYSCALL_COMPAT(fadvise64_64);
+COND_SYSCALL(lsm_get_self_attr);
+COND_SYSCALL(lsm_set_self_attr);
+COND_SYSCALL(lsm_list_modules);
/* CONFIG_MMU only */
COND_SYSCALL(swapon);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 649f2b48e8f0..481b7ab65e2c 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -56,7 +56,6 @@ extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, bool force);
extern void clockevents_handle_noop(struct clock_event_device *dev);
extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
-extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
/* Broadcasting support */
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -197,3 +196,5 @@ void hrtimers_resume_local(void);
#else
#define JIFFIES_SHIFT 8
#endif
+
+extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index be77b021e5d6..a17d26002831 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -839,6 +839,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
ts->next_timer = next_tick;
}
+ /* Make sure next_tick is never before basemono! */
+ if (WARN_ON_ONCE(basemono > next_tick))
+ next_tick = basemono;
+
/*
* If the tick is due in the next period, keep it ticking or
* force prod the timer.
@@ -887,7 +891,6 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 basemono = ts->timer_expires_base;
u64 expires = ts->timer_expires;
- ktime_t tick = expires;
/* Make sure we won't be trying to stop it twice in a row. */
ts->timer_expires_base = 0;
@@ -910,7 +913,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
/* Skip reprogram of event if it's not changed */
if (ts->tick_stopped && (expires == ts->next_tick)) {
/* Sanity check: make sure clockevent is actually programmed */
- if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
+ if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
return;
WARN_ON_ONCE(1);
@@ -920,11 +923,11 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
}
/*
- * nohz_stop_sched_tick() can be called several times before
- * nohz_restart_sched_tick() is called. This happens when
- * interrupts arrive which do not cause a reschedule. In the
- * first call we save the current tick time, so we can restart
- * the scheduler tick in nohz_restart_sched_tick().
+ * tick_nohz_stop_tick() can be called several times before
+ * tick_nohz_restart_sched_tick() is called. This happens when
+ * interrupts arrive which do not cause a reschedule. In the first
+ * call we save the current tick time, so we can restart the
+ * scheduler tick in tick_nohz_restart_sched_tick().
*/
if (!ts->tick_stopped) {
calc_load_nohz_start();
@@ -935,7 +938,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
trace_tick_stop(1, TICK_DEP_MASK_NONE);
}
- ts->next_tick = tick;
+ ts->next_tick = expires;
/*
* If the expiration time == KTIME_MAX, then we simply stop
@@ -950,11 +953,11 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
}
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
- hrtimer_start(&ts->sched_timer, tick,
+ hrtimer_start(&ts->sched_timer, expires,
HRTIMER_MODE_ABS_PINNED_HARD);
} else {
- hrtimer_set_expires(&ts->sched_timer, tick);
- tick_program_event(tick, 1);
+ hrtimer_set_expires(&ts->sched_timer, expires);
+ tick_program_event(expires, 1);
}
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 63a8ce7177dd..352b161113cd 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -571,18 +571,15 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk,
static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{
- if (!is_timers_nohz_active())
- return;
-
/*
- * TODO: This wants some optimizing similar to the code below, but we
- * will do that when we switch from push to pull for deferrable timers.
+ * Deferrable timers do not prevent the CPU from entering dynticks and
+ * are not taken into account on the idle/nohz_full path. An IPI when a
+ * new deferrable timer is enqueued will wake up the remote CPU but
+ * nothing will be done with the deferrable timer base. Therefore skip
+ * the remote IPI for deferrable timers completely.
*/
- if (timer->flags & TIMER_DEFERRABLE) {
- if (tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
+ if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE)
return;
- }
/*
* We might have to IPI the remote CPU if the base is idle and the
@@ -606,7 +603,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
- trace_timer_start(timer, timer->expires, timer->flags);
+ trace_timer_start(timer, bucket_expiry);
/*
* Check whether this is the new first expiring timer. The
@@ -942,31 +939,34 @@ get_target_base(struct timer_base *base, unsigned tflags)
return get_timer_this_cpu_base(tflags);
}
-static inline void forward_timer_base(struct timer_base *base)
+static inline void __forward_timer_base(struct timer_base *base,
+ unsigned long basej)
{
- unsigned long jnow = READ_ONCE(jiffies);
-
/*
- * No need to forward if we are close enough below jiffies.
- * Also while executing timers, base->clk is 1 offset ahead
- * of jiffies to avoid endless requeuing to current jiffies.
+ * Check whether we can forward the base. We can only do that when
+ * @basej is past base->clk otherwise we might rewind base->clk.
*/
- if ((long)(jnow - base->clk) < 1)
+ if (time_before_eq(basej, base->clk))
return;
/*
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
- if (time_after(base->next_expiry, jnow)) {
- base->clk = jnow;
+ if (time_after(base->next_expiry, basej)) {
+ base->clk = basej;
} else {
if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
return;
base->clk = base->next_expiry;
}
+
}
+static inline void forward_timer_base(struct timer_base *base)
+{
+ __forward_timer_base(base, READ_ONCE(jiffies));
+}
/*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -1803,8 +1803,10 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
/*
* Search the first expiring timer in the various clock levels. Caller must
* hold base->lock.
+ *
+ * Store next expiry time in base->next_expiry.
*/
-static unsigned long __next_timer_interrupt(struct timer_base *base)
+static void next_expiry_recalc(struct timer_base *base)
{
unsigned long clk, next, adj;
unsigned lvl, offset = 0;
@@ -1870,10 +1872,9 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
clk += adj;
}
+ base->next_expiry = next;
base->next_expiry_recalc = false;
base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
-
- return next;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -1921,8 +1922,9 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ unsigned long nextevt = basej + NEXT_TIMER_MAX_DELTA;
u64 expires = KTIME_MAX;
- unsigned long nextevt;
+ bool was_idle;
/*
* Pretend that there is no timer pending if the cpu is offline.
@@ -1933,37 +1935,44 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
raw_spin_lock(&base->lock);
if (base->next_expiry_recalc)
- base->next_expiry = __next_timer_interrupt(base);
- nextevt = base->next_expiry;
+ next_expiry_recalc(base);
/*
* We have a fresh next event. Check whether we can forward the
- * base. We can only do that when @basej is past base->clk
- * otherwise we might rewind base->clk.
+ * base.
*/
- if (time_after(basej, base->clk)) {
- if (time_after(nextevt, basej))
- base->clk = basej;
- else if (time_after(nextevt, base->clk))
- base->clk = nextevt;
- }
+ __forward_timer_base(base, basej);
- if (time_before_eq(nextevt, basej)) {
- expires = basem;
- base->is_idle = false;
+ if (base->timers_pending) {
+ nextevt = base->next_expiry;
+
+ /* If we missed a tick already, force 0 delta */
+ if (time_before(nextevt, basej))
+ nextevt = basej;
+ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
} else {
- if (base->timers_pending)
- expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/*
- * If we expect to sleep more than a tick, mark the base idle.
- * Also the tick is stopped so any added timer must forward
- * the base clk itself to keep granularity small. This idle
- * logic is only maintained for the BASE_STD base, deferrable
- * timers may still see large granularity skew (by design).
+ * Move next_expiry for the empty base into the future to
+ * prevent a unnecessary raise of the timer softirq when the
+ * next_expiry value will be reached even if there is no timer
+ * pending.
*/
- if ((expires - basem) > TICK_NSEC)
- base->is_idle = true;
+ base->next_expiry = nextevt;
}
+
+ /*
+ * Base is idle if the next event is more than a tick away.
+ *
+ * If the base is marked idle then any timer add operation must forward
+ * the base clk itself to keep granularity small. This idle logic is
+ * only maintained for the BASE_STD base, deferrable timers may still
+ * see large granularity skew (by design).
+ */
+ was_idle = base->is_idle;
+ base->is_idle = time_after(nextevt, basej + 1);
+ if (was_idle != base->is_idle)
+ trace_timer_base_idle(base->is_idle, base->cpu);
+
raw_spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);
@@ -1984,7 +1993,10 @@ void timer_clear_idle(void)
* sending the IPI a few instructions smaller for the cost of taking
* the lock in the exit from idle path.
*/
- base->is_idle = false;
+ if (base->is_idle) {
+ base->is_idle = false;
+ trace_timer_base_idle(false, smp_processor_id());
+ }
}
#endif
@@ -2015,8 +2027,12 @@ static inline void __run_timers(struct timer_base *base)
*/
WARN_ON_ONCE(!levels && !base->next_expiry_recalc
&& base->timers_pending);
+ /*
+ * While executing timers, base->clk is set 1 offset ahead of
+ * jiffies to avoid endless requeuing to current jiffies.
+ */
base->clk++;
- base->next_expiry = __next_timer_interrupt(base);
+ next_expiry_recalc(base);
while (levels--)
expire_timers(base, heads + levels);
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 9365ce407426..e76f5e1efdf2 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -2177,14 +2177,12 @@ static int user_events_open(struct inode *node, struct file *file)
static ssize_t user_events_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct iovec iov;
struct iov_iter i;
if (unlikely(*ppos != 0))
return -EFAULT;
- if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
- count, &iov, &i)))
+ if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i)))
return -EFAULT;
return user_events_write_core(file, &i);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index eabe8bcc7042..ce4d99df5f0e 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -231,7 +231,7 @@ void __put_user_ns(struct user_namespace *ns)
}
EXPORT_SYMBOL(__put_user_ns);
-/**
+/*
* struct idmap_key - holds the information necessary to find an idmapping in a
* sorted idmap array. It is passed to cmp_map_id() as first argument.
*/
@@ -241,7 +241,7 @@ struct idmap_key {
u32 count; /* == 0 unless used with map_id_range_down() */
};
-/**
+/*
* cmp_map_id - Function to be passed to bsearch() to find the requested
* idmapping. Expects struct idmap_key to be passed via @k.
*/
@@ -271,7 +271,7 @@ static int cmp_map_id(const void *k, const void *e)
return 1;
}
-/**
+/*
* map_id_range_down_max - Find idmap via binary search in ordered idmap array.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
@@ -288,7 +288,7 @@ map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 cou
sizeof(struct uid_gid_extent), cmp_map_id);
}
-/**
+/*
* map_id_range_down_base - Find idmap via binary search in static extent array.
* Can only be called if number of mappings is equal or less than
* UID_GID_MAP_MAX_BASE_EXTENTS.
@@ -332,12 +332,12 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
return id;
}
-static u32 map_id_down(struct uid_gid_map *map, u32 id)
+u32 map_id_down(struct uid_gid_map *map, u32 id)
{
return map_id_range_down(map, id, 1);
}
-/**
+/*
* map_id_up_base - Find idmap via binary search in static extent array.
* Can only be called if number of mappings is equal or less than
* UID_GID_MAP_MAX_BASE_EXTENTS.
@@ -358,7 +358,7 @@ map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
return NULL;
}
-/**
+/*
* map_id_up_max - Find idmap via binary search in ordered idmap array.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
@@ -375,7 +375,7 @@ map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
sizeof(struct uid_gid_extent), cmp_map_id);
}
-static u32 map_id_up(struct uid_gid_map *map, u32 id)
+u32 map_id_up(struct uid_gid_map *map, u32 id)
{
struct uid_gid_extent *extent;
unsigned extents = map->nr_extents;
@@ -770,7 +770,7 @@ static bool mappings_overlap(struct uid_gid_map *new_map,
return false;
}
-/**
+/*
* insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
* Takes care to allocate a 4K block of memory if the number of mappings exceeds
* UID_GID_MAP_MAX_BASE_EXTENTS.
@@ -839,7 +839,7 @@ static int cmp_extents_reverse(const void *a, const void *b)
return 0;
}
-/**
+/*
* sort_idmaps - Sorts an array of idmap entries.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 778b4056700f..03b90d7d2175 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -270,7 +270,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
goto error;
ret = -ENOMEM;
- pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto error;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5cd6d4e26915..81a8862295d6 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -91,7 +91,7 @@ static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
-static unsigned long watchdog_hardlockup_all_cpu_dumped;
+static unsigned long hard_lockup_nmi_warn;
notrace void arch_touch_nmi_watchdog(void)
{
@@ -151,12 +151,32 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
*/
if (is_hardlockup(cpu)) {
unsigned int this_cpu = smp_processor_id();
+ unsigned long flags;
/* Only print hardlockups once. */
if (per_cpu(watchdog_hardlockup_warned, cpu))
return;
+ /*
+ * Prevent multiple hard-lockup reports if one cpu is already
+ * engaged in dumping all cpu back traces.
+ */
+ if (sysctl_hardlockup_all_cpu_backtrace) {
+ if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
+ return;
+ }
+
+ /*
+ * NOTE: we call printk_cpu_sync_get_irqsave() after printing
+ * the lockup message. While it would be nice to serialize
+ * that printout, we really want to make sure that if some
+ * other CPU somehow locked up while holding the lock associated
+ * with printk_cpu_sync_get_irqsave() that we can still at least
+ * get the message about the lockup out.
+ */
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
+ printk_cpu_sync_get_irqsave(flags);
+
print_modules();
print_irqtrace_events(current);
if (cpu == this_cpu) {
@@ -164,17 +184,17 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
show_regs(regs);
else
dump_stack();
+ printk_cpu_sync_put_irqrestore(flags);
} else {
+ printk_cpu_sync_put_irqrestore(flags);
trigger_single_cpu_backtrace(cpu);
}
- /*
- * Perform multi-CPU dump only once to avoid multiple
- * hardlockups generating interleaving traces
- */
- if (sysctl_hardlockup_all_cpu_backtrace &&
- !test_and_set_bit(0, &watchdog_hardlockup_all_cpu_dumped))
+ if (sysctl_hardlockup_all_cpu_backtrace) {
trigger_allbutcpu_cpu_backtrace(cpu);
+ if (!hardlockup_panic)
+ clear_bit_unlock(0, &hard_lockup_nmi_warn);
+ }
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
@@ -448,6 +468,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ unsigned long flags;
if (!watchdog_enabled)
return HRTIMER_NORESTART;
@@ -514,6 +535,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */
update_report_ts();
+ printk_cpu_sync_get_irqsave(flags);
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
@@ -523,10 +545,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
show_regs(regs);
else
dump_stack();
+ printk_cpu_sync_put_irqrestore(flags);
if (softlockup_all_cpu_backtrace) {
trigger_allbutcpu_cpu_backtrace(smp_processor_id());
- clear_bit_unlock(0, &soft_lockup_nmi_warn);
+ if (!softlockup_panic)
+ clear_bit_unlock(0, &soft_lockup_nmi_warn);
}
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2989b57e154a..76e60faed892 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -381,6 +381,12 @@ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL&A: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
+/* PL: user requested unbound cpumask via sysfs */
+static cpumask_var_t wq_requested_unbound_cpumask;
+
+/* PL: isolated cpumask to be excluded from unbound cpumask */
+static cpumask_var_t wq_isolated_cpumask;
+
/* for further constrain wq_unbound_cpumask by cmdline parameter*/
static struct cpumask wq_cmdline_cpumask __initdata;
@@ -4408,19 +4414,6 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
mutex_unlock(&ctx->wq->mutex);
}
-static void apply_wqattrs_lock(void)
-{
- /* CPUs should stay stable across pwq creations and installations */
- cpus_read_lock();
- mutex_lock(&wq_pool_mutex);
-}
-
-static void apply_wqattrs_unlock(void)
-{
- mutex_unlock(&wq_pool_mutex);
- cpus_read_unlock();
-}
-
static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
@@ -5825,39 +5818,40 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
}
/**
- * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
- * @cpumask: the cpumask to set
- *
- * The low-level workqueues cpumask is a global cpumask that limits
- * the affinity of all unbound workqueues. This function check the @cpumask
- * and apply it to all unbound workqueues and updates all pwqs of them.
+ * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
+ * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
*
- * Return: 0 - Success
- * -EINVAL - Invalid @cpumask
- * -ENOMEM - Failed to allocate memory for attrs or pwqs.
+ * This function can be called from cpuset code to provide a set of isolated
+ * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold
+ * either cpus_read_lock or cpus_write_lock.
*/
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
{
- int ret = -EINVAL;
+ cpumask_var_t cpumask;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ lockdep_assert_cpus_held();
+ mutex_lock(&wq_pool_mutex);
+
+ /* Save the current isolated cpumask & export it via sysfs */
+ cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
/*
- * Not excluding isolated cpus on purpose.
- * If the user wishes to include them, we allow that.
+ * If the operation fails, it will fall back to
+ * wq_requested_unbound_cpumask which is initially set to
+ * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten
+ * by any subsequent write to workqueue/cpumask sysfs file.
*/
- cpumask_and(cpumask, cpumask, cpu_possible_mask);
- if (!cpumask_empty(cpumask)) {
- apply_wqattrs_lock();
- if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
- ret = 0;
- goto out_unlock;
- }
-
+ if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
+ cpumask_copy(cpumask, wq_requested_unbound_cpumask);
+ if (!cpumask_equal(cpumask, wq_unbound_cpumask))
ret = workqueue_apply_unbound_cpumask(cpumask);
-out_unlock:
- apply_wqattrs_unlock();
- }
-
+ mutex_unlock(&wq_pool_mutex);
+ free_cpumask_var(cpumask);
return ret;
}
@@ -5979,6 +5973,19 @@ static struct attribute *wq_sysfs_attrs[] = {
};
ATTRIBUTE_GROUPS(wq_sysfs);
+static void apply_wqattrs_lock(void)
+{
+ /* CPUs should stay stable across pwq creations and installations */
+ cpus_read_lock();
+ mutex_lock(&wq_pool_mutex);
+}
+
+static void apply_wqattrs_unlock(void)
+{
+ mutex_unlock(&wq_pool_mutex);
+ cpus_read_unlock();
+}
+
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -6155,19 +6162,74 @@ static struct bus_type wq_subsys = {
.dev_groups = wq_sysfs_groups,
};
-static ssize_t wq_unbound_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+/**
+ * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
+ * @cpumask: the cpumask to set
+ *
+ * The low-level workqueues cpumask is a global cpumask that limits
+ * the affinity of all unbound workqueues. This function check the @cpumask
+ * and apply it to all unbound workqueues and updates all pwqs of them.
+ *
+ * Return: 0 - Success
+ * -EINVAL - Invalid @cpumask
+ * -ENOMEM - Failed to allocate memory for attrs or pwqs.
+ */
+static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+{
+ int ret = -EINVAL;
+
+ /*
+ * Not excluding isolated cpus on purpose.
+ * If the user wishes to include them, we allow that.
+ */
+ cpumask_and(cpumask, cpumask, cpu_possible_mask);
+ if (!cpumask_empty(cpumask)) {
+ apply_wqattrs_lock();
+ cpumask_copy(wq_requested_unbound_cpumask, cpumask);
+ if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = workqueue_apply_unbound_cpumask(cpumask);
+
+out_unlock:
+ apply_wqattrs_unlock();
+ }
+
+ return ret;
+}
+
+static ssize_t __wq_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf, cpumask_var_t mask)
{
int written;
mutex_lock(&wq_pool_mutex);
- written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
- cpumask_pr_args(wq_unbound_cpumask));
+ written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
mutex_unlock(&wq_pool_mutex);
return written;
}
+static ssize_t wq_unbound_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
+}
+
+static ssize_t wq_requested_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
+}
+
+static ssize_t wq_isolated_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
+}
+
static ssize_t wq_unbound_cpumask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -6185,9 +6247,13 @@ static ssize_t wq_unbound_cpumask_store(struct device *dev,
return ret ? ret : count;
}
-static struct device_attribute wq_sysfs_cpumask_attr =
+static struct device_attribute wq_sysfs_cpumask_attrs[] = {
__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
- wq_unbound_cpumask_store);
+ wq_unbound_cpumask_store),
+ __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL),
+ __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL),
+ __ATTR_NULL,
+};
static int __init wq_sysfs_init(void)
{
@@ -6200,7 +6266,13 @@ static int __init wq_sysfs_init(void)
dev_root = bus_get_dev_root(&wq_subsys);
if (dev_root) {
- err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
+ struct device_attribute *attr;
+
+ for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) {
+ err = device_create_file(dev_root, attr);
+ if (err)
+ break;
+ }
put_device(dev_root);
}
return err;
@@ -6542,12 +6614,17 @@ void __init workqueue_init_early(void)
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
+
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
if (!cpumask_empty(&wq_cmdline_cpumask))
restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
+ cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
+
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
wq_update_pod_attrs_buf = alloc_workqueue_attrs();
diff --git a/lib/Kconfig b/lib/Kconfig
index 3ea1c830efab..5ddda7c2ed9b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -713,10 +713,20 @@ config ARCH_STACKWALK
config STACKDEPOT
bool
select STACKTRACE
+ help
+ Stack depot: stack trace storage that avoids duplication
config STACKDEPOT_ALWAYS_INIT
bool
select STACKDEPOT
+ help
+ Always initialize stack depot during early boot
+
+config STACKDEPOT_MAX_FRAMES
+ int "Maximum number of frames in trace saved in stack depot"
+ range 1 256
+ default 64
+ depends on STACKDEPOT
config REF_TRACKER
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4405f81248fb..97ce28f4d154 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -763,6 +763,8 @@ config DEBUG_STACK_USAGE
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
+ Also emits a message to dmesg when a process exits if that process
+ used more stack space than previously exiting processes.
This option will slow down process creation somewhat.
@@ -1970,7 +1972,6 @@ config FAULT_INJECTION
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
- depends on SLAB || SLUB
help
Provide fault-injection capability for kmalloc.
@@ -2088,10 +2089,6 @@ config KCOV
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
- If RANDOMIZE_BASE is enabled, PC values will not be stable across
- different machines and across reboots. If you need stable PC values,
- disable RANDOMIZE_BASE.
-
For more details, see Documentation/dev-tools/kcov.rst.
config KCOV_ENABLE_COMPARISONS
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index fdca89c05745..e6eda054ab27 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
- depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
+ depends on SYSFS && !SLUB_TINY
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
@@ -78,7 +78,7 @@ config KASAN_GENERIC
bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Generic KASAN.
@@ -89,13 +89,11 @@ config KASAN_GENERIC
overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_SW_TAGS
bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Software Tag-Based KASAN.
@@ -110,12 +108,9 @@ config KASAN_SW_TAGS
May potentially introduce problems related to pointer casting and
comparison, as it embeds a tag into the top byte of each pointer.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_HW_TAGS
bool "Hardware Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_HW_TAGS
- depends on SLUB
help
Enables Hardware Tag-Based KASAN.
@@ -134,7 +129,7 @@ endchoice
choice
prompt "Instrumentation type"
depends on KASAN_GENERIC || KASAN_SW_TAGS
- default KASAN_OUTLINE
+ default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE
config KASAN_OUTLINE
bool "Outline instrumentation"
@@ -207,4 +202,25 @@ config KASAN_MODULE_TEST
A part of the KASAN test suite that is not integrated with KUnit.
Incompatible with Hardware Tag-Based KASAN.
+config KASAN_EXTRA_INFO
+ bool "Record and report more information"
+ depends on KASAN
+ help
+ Record and report more information to help us find the cause of the
+ bug and to help us correlate the error with other system events.
+
+ Currently, the CPU number and timestamp are additionally
+ recorded for each heap block at allocation and free time, and
+ 8 bytes will be added to each metadata structure that records
+ allocation or free information.
+
+ In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add
+ 16 bytes of additional memory consumption, and each kmalloc-32
+ object will add 8 bytes of additional memory consumption, not
+ affecting other larger objects.
+
+ In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size
+ boot parameter, it will add 8 * stack_ring_size bytes of additional
+ memory consumption.
+
endif # KASAN
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index 459dda9ef619..6fbbebec683a 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
- depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ depends on HAVE_ARCH_KFENCE
select STACKTRACE
select IRQ_WORK
help
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
index ef2c8f256c57..0541d7b079cc 100644
--- a/lib/Kconfig.kmsan
+++ b/lib/Kconfig.kmsan
@@ -11,7 +11,7 @@ config HAVE_KMSAN_COMPILER
config KMSAN
bool "KMSAN: detector of uninitialized values use"
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
- depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
+ depends on DEBUG_KERNEL && !KASAN && !KCSAN
depends on !PREEMPT_RT
select STACKDEPOT
select STACKDEPOT_ALWAYS_INIT
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d1a7d29d2ac9..9cddf35d3b66 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -49,46 +49,6 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
-/*
- * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
- * Reflected bits order, does not augment final value.
- */
-u16 const crc_ccitt_false_table[256] = {
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
- 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
- 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
- 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
- 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
- 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
- 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
- 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
- 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
- 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
- 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
- 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
- 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
- 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
- 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
- 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
- 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
- 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
- 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
- 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
- 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
- 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
- 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
-};
-EXPORT_SYMBOL(crc_ccitt_false_table);
-
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
@@ -104,20 +64,5 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
-/**
- * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
- * for the data buffer
- * @crc: previous CRC value
- * @buffer: data pointer
- * @len: number of bytes in the buffer
- */
-u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
-{
- while (len--)
- crc = crc_ccitt_false_byte(crc, *buffer++);
- return crc;
-}
-EXPORT_SYMBOL(crc_ccitt_false);
-
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2a8e9d63fbe3..fb12a9bacd2f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -620,9 +620,8 @@ static void debug_objects_fill_pool(void)
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
debug_objects_fill_pool();
@@ -643,24 +642,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
- break;
-
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
- debug_object_fixup(descr->fixup_init, addr, state);
- return;
-
- case ODEBUG_STATE_DESTROYED:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
return;
default:
break;
}
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "init");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_init, addr, o.state);
}
/**
@@ -701,11 +694,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
- enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- int ret;
if (!debug_objects_enabled)
return 0;
@@ -717,49 +708,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object_or_alloc(addr, db, descr, false, true);
- if (likely(!IS_ERR_OR_NULL(obj))) {
- bool print_object = false;
-
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return 0;
+ } else if (likely(!IS_ERR(obj))) {
switch (obj->state) {
- case ODEBUG_STATE_INIT:
- case ODEBUG_STATE_INACTIVE:
- obj->state = ODEBUG_STATE_ACTIVE;
- ret = 0;
- break;
-
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, state);
- return ret ? 0 : -EINVAL;
-
case ODEBUG_STATE_DESTROYED:
- print_object = true;
- ret = -EINVAL;
+ o = *obj;
break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ fallthrough;
default:
- ret = 0;
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return 0;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "activate");
- return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "activate");
- /* If NULL the allocation has hit OOM */
- if (!obj) {
- debug_objects_oom();
- return 0;
+ switch (o.state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
+ return 0;
+ fallthrough;
+ default:
+ return -EINVAL;
}
-
- /* Object is neither static nor tracked. It's not initialized */
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -770,10 +750,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
*/
void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -785,33 +765,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- if (!obj->astate)
- obj->state = ODEBUG_STATE_INACTIVE;
- else
- print_object = true;
- break;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
+ if (obj->astate)
+ break;
+ obj->state = ODEBUG_STATE_INACTIVE;
+ fallthrough;
default:
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "deactivate");
- } else if (print_object) {
- debug_print_object(obj, "deactivate");
- }
+ debug_print_object(&o, "deactivate");
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -822,11 +793,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
*/
void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -836,32 +805,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
- break;
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
+ fallthrough;
+ default:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "destroy");
- debug_object_fixup(descr->fixup_destroy, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
- default:
- break;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "destroy");
+ debug_print_object(&o, "destroy");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_destroy, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -872,9 +840,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
*/
void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -885,24 +852,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free, addr, state);
- return;
+ break;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "free");
+
+ debug_object_fixup(descr->fixup_free, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_free);
@@ -954,10 +923,10 @@ void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -970,28 +939,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- if (obj->astate == expect)
- obj->astate = next;
- else
- print_object = true;
- break;
-
+ if (obj->astate != expect)
+ break;
+ obj->astate = next;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
default:
- print_object = true;
break;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "active_state");
- } else if (print_object) {
- debug_print_object(obj, "active_state");
- }
+ debug_print_object(&o, "active_state");
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -999,12 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- const struct debug_obj_descr *descr;
- enum debug_obj_state state;
+ int cnt, objs_checked = 0;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
struct hlist_node *tmp;
- struct debug_obj *obj;
- int cnt, objs_checked = 0;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -1026,12 +984,10 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- descr = obj->descr;
- state = obj->state;
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free,
- (void *) oaddr, state);
+ debug_print_object(&o, "free");
+ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
goto repeat;
default:
hlist_del(&obj->node);
diff --git a/lib/fw_table.c b/lib/fw_table.c
index 294df54e33b6..c49a09ee3853 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -85,11 +85,6 @@ acpi_get_subtable_type(char *id)
return ACPI_SUBTABLE_COMMON;
}
-static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
-{
- return proc->handler || proc->handler_arg;
-}
-
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
@@ -133,7 +128,6 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
unsigned long table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
int count = 0;
- int errs = 0;
int i;
table_end = (unsigned long)table_header + table_header->length;
@@ -145,25 +139,19 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
((unsigned long)table_header + table_size);
subtable_len = acpi_get_subtable_header_length(&entry);
- while (((unsigned long)entry.hdr) + subtable_len < table_end) {
- if (max_entries && count >= max_entries)
- break;
-
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
- if (!has_handler(&proc[i]) ||
- (!errs &&
- call_handler(&proc[i], entry.hdr, table_end))) {
- errs++;
- continue;
- }
+
+ if (!max_entries || count < max_entries)
+ if (call_handler(&proc[i], entry.hdr, table_end))
+ return -EINVAL;
proc[i].count++;
+ count++;
break;
}
- if (i != proc_num)
- count++;
/*
* If entry->length is 0, break from this loop to avoid
@@ -180,9 +168,9 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
}
if (max_entries && count > max_entries) {
- pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
- id, proc->id, count);
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, proc->id, count - max_entries, count);
}
- return errs ? -EINVAL : count;
+ return count;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8ff6824a1005..e0aa6b440ca5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1369,19 +1369,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
}
EXPORT_SYMBOL(import_iovec);
-int import_single_range(int rw, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i)
-{
- if (len > MAX_RW_COUNT)
- len = MAX_RW_COUNT;
- if (unlikely(!access_ok(buf, len)))
- return -EFAULT;
-
- iov_iter_ubuf(i, rw, buf, len);
- return 0;
-}
-EXPORT_SYMBOL(import_single_range);
-
int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 684689457d77..6f241bb38799 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4,6 +4,8 @@
* Copyright (c) 2018-2022 Oracle Corporation
* Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
* Matthew Wilcox <willy@infradead.org>
+ * Copyright (c) 2023 ByteDance
+ * Author: Peng Zhang <zhangpeng.00@bytedance.com>
*/
/*
@@ -14,8 +16,8 @@
* and are simply the slot index + the minimum of the node.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
- * indicate that the tree is specifying ranges, Pivots may appear in the
- * subtree with an entry attached to the value where as keys are unique to a
+ * indicate that the tree is specifying ranges. Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*
@@ -165,6 +167,11 @@ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
}
+static inline void mt_free_one(struct maple_node *node)
+{
+ kmem_cache_free(maple_node_cache, node);
+}
+
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
@@ -205,23 +212,29 @@ static unsigned int mas_mt_height(struct ma_state *mas)
return mt_height(mas->tree);
}
-static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+static inline unsigned int mt_attr(struct maple_tree *mt)
+{
+ return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
+}
+
+static __always_inline enum maple_type mte_node_type(
+ const struct maple_enode *entry)
{
return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
MAPLE_NODE_TYPE_MASK;
}
-static inline bool ma_is_dense(const enum maple_type type)
+static __always_inline bool ma_is_dense(const enum maple_type type)
{
return type < maple_leaf_64;
}
-static inline bool ma_is_leaf(const enum maple_type type)
+static __always_inline bool ma_is_leaf(const enum maple_type type)
{
return type < maple_range_64;
}
-static inline bool mte_is_leaf(const struct maple_enode *entry)
+static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
{
return ma_is_leaf(mte_node_type(entry));
}
@@ -230,60 +243,50 @@ static inline bool mte_is_leaf(const struct maple_enode *entry)
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
-static inline bool mt_is_reserved(const void *entry)
+static __always_inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
-static inline void mas_set_err(struct ma_state *mas, long err)
+static __always_inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
+ mas->status = ma_error;
}
-static inline bool mas_is_ptr(const struct ma_state *mas)
+static __always_inline bool mas_is_ptr(const struct ma_state *mas)
{
- return mas->node == MAS_ROOT;
+ return mas->status == ma_root;
}
-static inline bool mas_is_start(const struct ma_state *mas)
+static __always_inline bool mas_is_start(const struct ma_state *mas)
{
- return mas->node == MAS_START;
+ return mas->status == ma_start;
}
-bool mas_is_err(struct ma_state *mas)
+static __always_inline bool mas_is_none(const struct ma_state *mas)
{
- return xa_is_err(mas->node);
+ return mas->status == ma_none;
}
-static __always_inline bool mas_is_overflow(struct ma_state *mas)
+static __always_inline bool mas_is_paused(const struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_OVERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_pause;
}
-static __always_inline bool mas_is_underflow(struct ma_state *mas)
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_UNDERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_overflow;
}
-static inline bool mas_searchable(struct ma_state *mas)
+static inline bool mas_is_underflow(struct ma_state *mas)
{
- if (mas_is_none(mas))
- return false;
-
- if (mas_is_ptr(mas))
- return false;
-
- return true;
+ return mas->status == ma_underflow;
}
-static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+static __always_inline struct maple_node *mte_to_node(
+ const struct maple_enode *entry)
{
return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
}
@@ -360,12 +363,12 @@ static inline bool mte_has_null(const struct maple_enode *node)
return (unsigned long)node & MAPLE_ENODE_NULL;
}
-static inline bool ma_is_root(struct maple_node *node)
+static __always_inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
-static inline bool mte_is_root(const struct maple_enode *node)
+static __always_inline bool mte_is_root(const struct maple_enode *node)
{
return ma_is_root(mte_to_node(node));
}
@@ -375,7 +378,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas)
return !mas->min && mas->max == ULONG_MAX;
}
-static inline bool mt_is_alloc(struct maple_tree *mt)
+static __always_inline bool mt_is_alloc(struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
}
@@ -514,11 +517,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
*
* Return: The slot in the parent node where @enode resides.
*/
-static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+static __always_inline
+unsigned int mte_parent_slot(const struct maple_enode *enode)
{
unsigned long val = (unsigned long)mte_to_node(enode)->parent;
- if (val & MA_ROOT_PARENT)
+ if (unlikely(val & MA_ROOT_PARENT))
return 0;
/*
@@ -534,7 +538,8 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
*
* Return: The parent maple node.
*/
-static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+static __always_inline
+struct maple_node *mte_parent(const struct maple_enode *enode)
{
return (void *)((unsigned long)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
@@ -546,7 +551,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
*
* Return: true if dead, false otherwise.
*/
-static inline bool ma_dead_node(const struct maple_node *node)
+static __always_inline bool ma_dead_node(const struct maple_node *node)
{
struct maple_node *parent;
@@ -562,7 +567,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
*
* Return: true if dead, false otherwise.
*/
-static inline bool mte_dead_node(const struct maple_enode *enode)
+static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
struct maple_node *parent, *node;
@@ -680,35 +685,6 @@ static inline unsigned long *ma_gaps(struct maple_node *node,
}
/*
- * mas_pivot() - Get the pivot at @piv of the maple encoded node.
- * @mas: The maple state.
- * @piv: The pivot.
- *
- * Return: the pivot at @piv of @mn.
- */
-static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
-{
- struct maple_node *node = mas_mn(mas);
- enum maple_type type = mte_node_type(mas->node);
-
- if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
- mas_set_err(mas, -EIO);
- return 0;
- }
-
- switch (type) {
- case maple_arange_64:
- return node->ma64.pivot[piv];
- case maple_range_64:
- case maple_leaf_64:
- return node->mr64.pivot[piv];
- case maple_dense:
- return 0;
- }
- return 0;
-}
-
-/*
* mas_safe_pivot() - get the pivot at @piv or mas->max.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
@@ -718,7 +694,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
* Return: The pivot at @piv within the limit of the @pivots array, @mas->max
* otherwise.
*/
-static inline unsigned long
+static __always_inline unsigned long
mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
{
@@ -759,7 +735,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
BUG_ON(piv >= mt_pivots[type]);
switch (type) {
- default:
case maple_range_64:
case maple_leaf_64:
node->mr64.pivot[piv] = val;
@@ -783,7 +758,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{
switch (mt) {
- default:
case maple_arange_64:
return mn->ma64.slot;
case maple_range_64:
@@ -792,6 +766,8 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
case maple_dense:
return mn->slot;
}
+
+ return NULL;
}
static inline bool mt_write_locked(const struct maple_tree *mt)
@@ -800,20 +776,20 @@ static inline bool mt_write_locked(const struct maple_tree *mt)
lockdep_is_held(&mt->ma_lock);
}
-static inline bool mt_locked(const struct maple_tree *mt)
+static __always_inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
-static inline void *mt_slot(const struct maple_tree *mt,
+static __always_inline void *mt_slot(const struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
-static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mt_slot_locked(struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
{
return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
@@ -825,8 +801,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset.
*/
-static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot_locked(struct ma_state *mas,
+ void __rcu **slots, unsigned char offset)
{
return mt_slot_locked(mas->tree, slots, offset);
}
@@ -839,8 +815,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset
*/
-static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
{
return mt_slot(mas->tree, slots, offset);
}
@@ -851,7 +827,7 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
*
* Return: The pointer to the root of the tree
*/
-static inline void *mas_root(struct ma_state *mas)
+static __always_inline void *mas_root(struct ma_state *mas)
{
return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
@@ -954,10 +930,8 @@ static inline unsigned char ma_meta_end(struct maple_node *mn,
/*
* ma_meta_gap() - Get the largest gap location of a node from the metadata
* @mn: The maple node
- * @mt: The maple node type
*/
-static inline unsigned char ma_meta_gap(struct maple_node *mn,
- enum maple_type mt)
+static inline unsigned char ma_meta_gap(struct maple_node *mn)
{
return mn->ma64.meta.gap;
}
@@ -1112,14 +1086,16 @@ static int mas_ascend(struct ma_state *mas)
return 0;
}
- if (!mas->min)
+ min = 0;
+ max = ULONG_MAX;
+ if (!mas->offset) {
+ min = mas->min;
set_min = true;
+ }
if (mas->max == ULONG_MAX)
set_max = true;
- min = 0;
- max = ULONG_MAX;
do {
p_enode = a_enode;
a_type = mas_parent_type(mas, p_enode);
@@ -1258,6 +1234,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
+ BUG_ON(!allocated);
WARN_ON(!allocated);
}
@@ -1363,14 +1340,14 @@ static void mas_node_count(struct ma_state *mas, int count)
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
- * If mas->node == MAS_START, then set the min, max and depth to
+ * If mas->status == mas_start, then set the min, max and depth to
* defaults.
*
* Return:
- * - If mas->node is an error or not MAS_START, return NULL.
- * - If it's an empty tree: NULL & mas->node == MAS_NONE
- * - If it's a single entry: The entry & mas->node == MAS_ROOT
- * - If it's a tree: NULL & mas->node == safe root node.
+ * - If mas->node is an error or not mas_start, return NULL.
+ * - If it's an empty tree: NULL & mas->status == ma_none
+ * - If it's a single entry: The entry & mas->status == mas_root
+ * - If it's a tree: NULL & mas->status == safe root node.
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1386,6 +1363,7 @@ retry:
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 1;
+ mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
if (mte_dead_node(mas->node))
@@ -1396,13 +1374,14 @@ retry:
/* empty tree */
if (unlikely(!root)) {
- mas->node = MAS_NONE;
+ mas->node = NULL;
+ mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
}
/* Single entry tree */
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */
@@ -1425,10 +1404,8 @@ retry:
* Uses metadata to find the end of the data when possible.
* Return: The zero indexed last slot with data (may be null).
*/
-static inline unsigned char ma_data_end(struct maple_node *node,
- enum maple_type type,
- unsigned long *pivots,
- unsigned long max)
+static __always_inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type, unsigned long *pivots, unsigned long max)
{
unsigned char offset;
@@ -1541,6 +1518,9 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
gap = ULONG_MAX - pivots[max_piv];
if (gap > max_gap)
max_gap = gap;
+
+ if (max_gap > pivots[max_piv] - mas->min)
+ return max_gap;
}
for (; i <= max_piv; i++) {
@@ -1608,7 +1588,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas)
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
- offset = ma_meta_gap(node, mt);
+ offset = ma_meta_gap(node);
gaps = ma_gaps(node, mt);
return gaps[offset];
}
@@ -1639,7 +1619,7 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
- meta_offset = ma_meta_gap(pnode, pmt);
+ meta_offset = ma_meta_gap(pnode);
meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
@@ -1987,27 +1967,13 @@ complete:
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
- * @mas: The maple state
* @node: The maple node
- * @pivots: pointer to the maple node pivots
* @mt: The maple type
- * @end: The assumed end
- *
- * Note, end may be incremented within this function but not modified at the
- * source. This is fine since the metadata is the last thing to be stored in a
- * node during a write.
+ * @end: The node end
*/
-static inline void mas_leaf_set_meta(struct ma_state *mas,
- struct maple_node *node, unsigned long *pivots,
+static inline void mas_leaf_set_meta(struct maple_node *node,
enum maple_type mt, unsigned char end)
{
- /* There is no room for metadata already */
- if (mt_pivots[mt] <= end)
- return;
-
- if (pivots[end] && pivots[end] < mas->max)
- end++;
-
if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
@@ -2064,7 +2030,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
ma_set_meta(node, mt, offset, end);
} else {
- mas_leaf_set_meta(mas, node, pivots, mt, end);
+ mas_leaf_set_meta(node, mt, end);
}
}
@@ -2152,11 +2118,11 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
}
slot = offset_end + 1;
- if (slot > wr_mas->node_end)
+ if (slot > mas->end)
goto b_end;
/* Copy end data to the end of the node. */
- mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
+ mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
b_node->b_end--;
return;
@@ -2211,19 +2177,21 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_node() - Return the encoded node or MAS_NONE.
+ * mte_node_or_none() - Set the enode and state.
* @enode: The encoded maple node.
*
- * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
- *
- * Return: @enode or MAS_NONE
+ * Set the node to the enode and the status.
*/
-static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+static inline void mas_node_or_none(struct ma_state *mas,
+ struct maple_enode *enode)
{
- if (enode)
- return enode;
-
- return ma_enode_ptr(MAS_NONE);
+ if (enode) {
+ mas->node = enode;
+ mas->status = ma_active;
+ } else {
+ mas->node = NULL;
+ mas->status = ma_none;
+ }
}
/*
@@ -2245,8 +2213,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
wr_mas->node = mas_mn(wr_mas->mas);
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
- count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
- wr_mas->pivots, mas->max);
+ count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
offset = mas->offset;
while (offset < count && mas->index > wr_mas->pivots[offset])
@@ -2535,7 +2503,7 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
}
/*
- * mas_topiary_node() - Dispose of a singe node
+ * mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
* @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
@@ -2543,13 +2511,15 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
* The node will either be RCU freed or pushed back on the maple state.
*/
static inline void mas_topiary_node(struct ma_state *mas,
- struct maple_enode *enode, bool in_rcu)
+ struct ma_state *tmp_mas, bool in_rcu)
{
struct maple_node *tmp;
+ struct maple_enode *enode;
- if (enode == MAS_NONE)
+ if (mas_is_none(tmp_mas))
return;
+ enode = tmp_mas->node;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
if (in_rcu)
@@ -2589,8 +2559,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/* Update the parent pointers in the tree */
tmp[0] = *mas;
tmp[0].offset = 0;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
while (!mte_is_leaf(tmp[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
@@ -2610,7 +2580,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++)
tmp[i] = tmp_next[i];
@@ -2623,8 +2593,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[0].node = old_enode;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
in_rcu = mt_in_rcu(mas->tree);
do {
n = 0;
@@ -2639,7 +2609,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
if ((tmp_next[n].min >= tmp_next->index) &&
(tmp_next[n].max <= tmp_next->last)) {
mat_add(&subtrees, tmp_next[n].node);
- tmp_next[n].node = MAS_NONE;
+ tmp_next[n].status = ma_none;
} else {
n++;
}
@@ -2650,16 +2620,16 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++) {
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
tmp[i] = tmp_next[i];
}
} while (!mte_is_leaf(tmp[0].node));
for (i = 0; i < 3; i++)
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
mas_mat_destroy(mas, &subtrees);
}
@@ -2698,9 +2668,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
{
bool new_lmax = true;
- mast->l->node = mte_node_or_none(left);
- mast->m->node = mte_node_or_none(middle);
- mast->r->node = mte_node_or_none(right);
+ mas_node_or_none(mast->l, left);
+ mas_node_or_none(mast->m, middle);
+ mas_node_or_none(mast->r, right);
mast->l->min = mast->orig_l->min;
if (split == mast->bn->b_end) {
@@ -2796,32 +2766,29 @@ static inline void *mtree_range_walk(struct ma_state *mas)
min = mas->min;
max = mas->max;
do {
- offset = 0;
last = next;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
-
- if (pivots[offset] >= mas->index) {
- prev_max = max;
- prev_min = min;
- max = pivots[offset];
+ prev_min = min;
+ prev_max = max;
+ if (pivots[0] >= mas->index) {
+ offset = 0;
+ max = pivots[0];
goto next;
}
- do {
+ offset = 1;
+ while (offset < end) {
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
offset++;
- } while ((offset < end) && (pivots[offset] < mas->index));
+ }
- prev_min = min;
min = pivots[offset - 1] + 1;
- prev_max = max;
- if (likely(offset < end && pivots[offset]))
- max = pivots[offset];
-
next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
@@ -2829,6 +2796,7 @@ next:
goto dead_node;
} while (!ma_is_leaf(type));
+ mas->end = end;
mas->offset = offset;
mas->index = min;
mas->last = max;
@@ -2879,7 +2847,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
- l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+ l_mas.status = r_mas.status = m_mas.status = ma_none;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
@@ -3167,7 +3135,7 @@ done:
* @mas: The maple state
* @height: The height of the tree in case it's a new root.
*/
-static inline bool mas_split_final_node(struct maple_subtree_state *mast,
+static inline void mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas, int height)
{
struct maple_enode *ancestor;
@@ -3191,7 +3159,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast,
mast->l->node = ancestor;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
- return true;
}
/*
@@ -3406,7 +3373,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
/* Try to push left. */
if (mas_push_data(mas, height, &mast, true))
break;
-
/* Try to push right. */
if (mas_push_data(mas, height, &mast, false))
break;
@@ -3495,6 +3461,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
mas_replace_node(wr_mas->mas, old_enode);
reuse_node:
mas_update_gap(wr_mas->mas);
+ wr_mas->mas->end = b_end;
return 1;
}
@@ -3521,6 +3488,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
if (mas->index) {
if (contents) {
@@ -3553,7 +3521,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
}
}
@@ -3730,23 +3698,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
- unsigned long max;
next = mas->node;
- max = ULONG_MAX;
do {
- offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
+ end = mt_pivots[type];
+ offset = 0;
do {
- if (pivots[offset] >= mas->index) {
- max = pivots[offset];
+ if (pivots[offset] >= mas->index)
break;
- }
} while (++offset < end);
slots = ma_slots(node, type);
@@ -3785,7 +3747,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
mas->depth = 0;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
goto done;
}
@@ -3798,6 +3760,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
mas->depth = 1;
@@ -3891,10 +3854,10 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
- mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
/* Copy r_mas into b_node. */
- if (r_mas.offset <= r_wr_mas.node_end)
- mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ if (r_mas.offset <= r_mas.end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
&b_node, b_node.b_end + 1);
else
b_node.b_end++;
@@ -3936,7 +3899,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
- mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+ mas_bulk_rebalance(mas, mas->end, wr_mas->type);
/* set up node. */
if (in_rcu) {
@@ -3972,12 +3935,12 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
- if (offset_end > wr_mas->node_end)
+ if (offset_end > mas->end)
goto done;
dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
- copy_size = wr_mas->node_end - offset_end + 1;
+ copy_size = mas->end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size);
memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
@@ -3987,7 +3950,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
dst_pivots[new_end] = mas->max;
done:
- mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
if (in_rcu) {
struct maple_enode *old_enode = mas->node;
@@ -3998,6 +3961,7 @@ done:
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
+ mas->end = new_end;
return true;
}
@@ -4063,10 +4027,10 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
} else {
/* Check next slot(s) if we are overwriting the end */
if ((mas->last == wr_mas->end_piv) &&
- (wr_mas->node_end != wr_mas->offset_end) &&
+ (mas->end != wr_mas->offset_end) &&
!wr_mas->slots[wr_mas->offset_end + 1]) {
wr_mas->offset_end++;
- if (wr_mas->offset_end == wr_mas->node_end)
+ if (wr_mas->offset_end == mas->end)
mas->last = mas->max;
else
mas->last = wr_mas->pivots[wr_mas->offset_end];
@@ -4091,11 +4055,11 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
- while ((wr_mas->offset_end < wr_mas->node_end) &&
+ while ((wr_mas->offset_end < wr_mas->mas->end) &&
(wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
wr_mas->offset_end++;
- if (wr_mas->offset_end < wr_mas->node_end)
+ if (wr_mas->offset_end < wr_mas->mas->end)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
@@ -4107,7 +4071,7 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end = wr_mas->node_end + 2;
+ unsigned char new_end = mas->end + 2;
new_end -= wr_mas->offset_end - mas->offset;
if (wr_mas->r_min == mas->index)
@@ -4141,10 +4105,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (mt_in_rcu(mas->tree))
return false;
- if (mas->offset != wr_mas->node_end)
- return false;
-
- end = wr_mas->node_end;
+ end = mas->end;
if (mas->offset != end)
return false;
@@ -4178,6 +4139,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
+ mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return true;
}
@@ -4195,7 +4157,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
}
static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
@@ -4223,7 +4185,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (mas_wr_append(wr_mas, new_end))
return;
- if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
+ if (new_end == mas->end && mas_wr_slot_store(wr_mas))
return;
if (mas_wr_node_store(wr_mas, new_end))
@@ -4328,7 +4290,7 @@ exists:
}
-static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
mas_set(mas, index);
@@ -4337,7 +4299,7 @@ retry:
goto retry;
}
-static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
struct maple_node *node, const unsigned long index)
{
if (unlikely(ma_dead_node(node))) {
@@ -4349,14 +4311,16 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas,
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
- * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
+ * tree. The prev value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* @mas: The maple state
* @min: The lower limit to search
*
- * The prev node value will be mas->node[mas->offset] or MAS_NONE.
+ * The prev node value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* Return: 1 if the node is dead, 0 otherwise.
*/
-static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+static int mas_prev_node(struct ma_state *mas, unsigned long min)
{
enum maple_type mt;
int offset, level;
@@ -4416,13 +4380,14 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
if (unlikely(mte_dead_node(mas->node)))
return 1;
+ mas->end = mas->offset;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return 0;
}
@@ -4436,8 +4401,7 @@ no_entry:
*
* Return: The entry in the previous slot which is possibly NULL
*/
-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
- bool set_underflow)
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
{
void *entry;
void __rcu **slots;
@@ -4470,13 +4434,16 @@ again:
mas->last = mas->index - 1;
mas->index = mas_safe_min(mas, pivots, mas->offset);
} else {
+ if (mas->index <= min)
+ goto underflow;
+
if (mas_prev_node(mas, min)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (mas_is_none(mas))
- goto underflow;
+ if (WARN_ON_ONCE(mas_is_underflow(mas)))
+ return NULL;
mas->last = mas->max;
node = mas_mn(mas);
@@ -4490,12 +4457,15 @@ again:
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
+
if (likely(entry))
return entry;
if (!empty) {
- if (mas->index <= min)
- goto underflow;
+ if (mas->index <= min) {
+ mas->status = ma_underflow;
+ return NULL;
+ }
goto again;
}
@@ -4503,8 +4473,7 @@ again:
return entry;
underflow:
- if (set_underflow)
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return NULL;
}
@@ -4513,28 +4482,30 @@ underflow:
* @mas: The maple state
* @max: The maximum pivot value to check.
*
- * The next value will be mas->node[mas->offset] or MAS_NONE.
+ * The next value will be mas->node[mas->offset] or the status will have
+ * overflowed.
* Return: 1 on dead node, 0 otherwise.
*/
-static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
- unsigned long max)
+static int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
{
unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
+ struct maple_node *tmp;
int level = 0;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
if (mas->max >= max)
- goto no_entry;
+ goto overflow;
min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node))
- goto no_entry;
+ goto overflow;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
@@ -4574,6 +4545,10 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
pivots = ma_pivots(node, mt);
mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
+ tmp = mte_to_node(enode);
+ mt = mte_node_type(enode);
+ pivots = ma_pivots(tmp, mt);
+ mas->end = ma_data_end(tmp, mt, pivots, mas->max);
if (unlikely(ma_dead_node(node)))
return 1;
@@ -4581,11 +4556,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
mas->min = min;
return 0;
-no_entry:
+overflow:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_overflow;
return 0;
}
@@ -4600,15 +4575,13 @@ no_entry:
*
* Return: The entry in the next slot which is possibly NULL
*/
-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
- bool set_overflow)
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
void __rcu **slots;
unsigned long *pivots;
unsigned long pivot;
enum maple_type type;
struct maple_node *node;
- unsigned char data_end;
unsigned long save_point = mas->last;
void *entry;
@@ -4616,42 +4589,45 @@ retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
- data_end = ma_data_end(node, type, pivots, mas->max);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (mas->max >= max) {
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
pivot = pivots[mas->offset];
else
- goto overflow;
+ pivot = mas->max;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
- if (pivot >= max)
- goto overflow;
+ if (pivot >= max) { /* Was at the limit, next will extend beyond */
+ mas->status = ma_overflow;
+ return NULL;
+ }
}
- if (likely(mas->offset < data_end)) {
+ if (likely(mas->offset < mas->end)) {
mas->index = pivots[mas->offset] + 1;
again:
mas->offset++;
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
mas->last = pivots[mas->offset];
else
mas->last = mas->max;
} else {
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
if (mas_next_node(mas, node, max)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (WARN_ON_ONCE(mas_is_none(mas))) {
- mas->node = MAS_OVERFLOW;
+ if (WARN_ON_ONCE(mas_is_overflow(mas)))
return NULL;
- goto overflow;
- }
mas->offset = 0;
mas->index = mas->min;
@@ -4669,21 +4645,18 @@ again:
if (entry)
return entry;
+
if (!empty) {
- if (mas->last >= max)
- goto overflow;
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
mas->index = mas->last + 1;
- /* Node cannot end on NULL, so it's safe to short-cut here */
goto again;
}
return entry;
-
-overflow:
- if (set_overflow)
- mas->node = MAS_OVERFLOW;
- return NULL;
}
/*
@@ -4702,11 +4675,11 @@ overflow:
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
if (mas->last >= limit) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return NULL;
}
- return mas_next_slot(mas, limit, false, true);
+ return mas_next_slot(mas, limit, false);
}
/*
@@ -4874,7 +4847,7 @@ done:
* @mas: The maple state.
*
* mas->index and mas->last will be set to the range if there is a value. If
- * mas->node is MAS_NONE, reset to MAS_START.
+ * mas->status is ma_none, reset to ma_start
*
* Return: the entry at the location or %NULL.
*/
@@ -4883,7 +4856,7 @@ void *mas_walk(struct ma_state *mas)
void *entry;
if (!mas_is_active(mas) || !mas_is_start(mas))
- mas->node = MAS_START;
+ mas->status = ma_start;
retry:
entry = mas_state_walk(mas);
if (mas_is_start(mas)) {
@@ -4899,7 +4872,7 @@ retry:
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return NULL;
}
@@ -5026,6 +4999,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned char offset;
unsigned long *pivots;
enum maple_type mt;
+ struct maple_node *node;
if (min > max)
return -EINVAL;
@@ -5056,12 +5030,14 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
if (unlikely(offset == MAPLE_NODE_SLOTS))
return -EBUSY;
+ node = mas_mn(mas);
mt = mte_node_type(mas->node);
- pivots = ma_pivots(mas_mn(mas), mt);
+ pivots = ma_pivots(node, mt);
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
+ mas->end = ma_data_end(node, mt, pivots, mas->max);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area);
@@ -5122,6 +5098,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas->last = max;
mas->index = mas->last - size + 1;
+ mas->end = mas_data_end(mas);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
@@ -5503,7 +5480,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
node_size = mas_wr_new_end(&wr_mas);
/* Slot store, does not require additional nodes */
- if (node_size == wr_mas.node_end) {
+ if (node_size == mas->end) {
/* reuse node */
if (!mt_in_rcu(mas->tree))
return 0;
@@ -5518,7 +5495,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
goto ask_now;
}
- /* New root needs a singe node */
+ /* New root needs a single node */
if (unlikely(mte_is_root(mas->node)))
goto ask_now;
@@ -5566,7 +5543,7 @@ void mas_destroy(struct ma_state *mas)
mas_start(mas);
mtree_range_walk(mas);
- end = mas_data_end(mas) + 1;
+ end = mas->end + 1;
if (end < mt_min_slot_count(mas->node) - 1)
mas_destroy_rebalance(mas, end);
@@ -5584,7 +5561,7 @@ void mas_destroy(struct ma_state *mas)
mt_free_bulk(count, (void __rcu **)&node->slot[1]);
total -= count;
}
- kmem_cache_free(maple_node_cache, node);
+ mt_free_one(ma_mnode_ptr(node));
total--;
}
@@ -5654,33 +5631,46 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
}
EXPORT_SYMBOL_GPL(mas_expected_entries);
-static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+static bool mas_next_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
bool was_none = mas_is_none(mas);
if (unlikely(mas->last >= max)) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas)) {
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ fallthrough;
+ case ma_start:
+ mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ break;
+ case ma_overflow:
/* Overflowed before, but the max changed */
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow:
+ /* The user expects the mas to be one before where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
- if (mas_is_start(mas))
- *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ if (likely(mas_is_active(mas))) /* Fast path */
+ return false;
if (mas_is_ptr(mas)) {
*entry = NULL;
@@ -5690,7 +5680,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
}
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -5719,7 +5709,7 @@ void *mas_next(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, true);
+ return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_next);
@@ -5742,7 +5732,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, true);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
@@ -5770,37 +5760,48 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
}
EXPORT_SYMBOL_GPL(mt_next);
-static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
- void **entry)
+static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
{
if (unlikely(mas->index <= min)) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_overflow(mas)) {
- mas->node = MAS_START;
+ case ma_start:
+ break;
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* underflowed before but the min changed */
+ mas->status = ma_active;
+ break;
+ case ma_overflow:
+ /* User expects mas to be one after where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
- }
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- /* underflowed before but the min changed */
- mas->node = MAS_START;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas))
mas_walk(mas);
if (unlikely(mas_is_ptr(mas))) {
- if (!mas->index)
- goto none;
+ if (!mas->index) {
+ mas->status = ma_none;
+ return true;
+ }
mas->index = mas->last = 0;
*entry = mas_root(mas);
return true;
@@ -5810,7 +5811,7 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
if (mas->index) {
/* Walked to out-of-range pointer? */
mas->index = mas->last = 0;
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
*entry = mas_root(mas);
return true;
}
@@ -5818,10 +5819,6 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
}
return false;
-
-none:
- mas->node = MAS_NONE;
- return true;
}
/**
@@ -5830,7 +5827,7 @@ none:
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the status is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5842,7 +5839,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, false, true);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_prev);
@@ -5853,7 +5850,7 @@ EXPORT_SYMBOL_GPL(mas_prev);
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the node is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5865,7 +5862,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, true, true);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
@@ -5908,7 +5905,8 @@ EXPORT_SYMBOL_GPL(mt_prev);
*/
void mas_pause(struct ma_state *mas)
{
- mas->node = MAS_PAUSE;
+ mas->status = ma_pause;
+ mas->node = NULL;
}
EXPORT_SYMBOL_GPL(mas_pause);
@@ -5920,35 +5918,54 @@ EXPORT_SYMBOL_GPL(mas_pause);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
- void **entry)
+static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
{
- if (mas_is_active(mas)) {
+ switch (mas->status) {
+ case ma_active:
if (mas->last < max)
return false;
-
return true;
- }
-
- if (mas_is_paused(mas)) {
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->last >= max))
return true;
mas->index = ++mas->last;
- mas->node = MAS_START;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (unlikely(mas->last >= max))
return true;
mas->index = mas->last;
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
- if (mas->index > max) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* mas is pointing at entry before unable to go lower */
+ if (unlikely(mas->index >= max)) {
+ mas->status = ma_overflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_overflow:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -5962,12 +5979,11 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
}
- if (unlikely(!mas_searchable(mas))) {
- if (unlikely(mas_is_ptr(mas)))
- goto ptr_out_of_range;
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+ if (unlikely(mas_is_none(mas)))
return true;
- }
if (mas->index == max)
return true;
@@ -5975,7 +5991,7 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
return false;
ptr_out_of_range:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
mas->index = 1;
mas->last = ULONG_MAX;
return true;
@@ -5989,7 +6005,7 @@ ptr_out_of_range:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -6001,7 +6017,10 @@ void *mas_find(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, false);
+ entry = mas_next_slot(mas, max, false);
+ /* Ignore overflow */
+ mas->status = ma_active;
+ return entry;
}
EXPORT_SYMBOL_GPL(mas_find);
@@ -6013,7 +6032,7 @@ EXPORT_SYMBOL_GPL(mas_find);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -6025,7 +6044,7 @@ void *mas_find_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, false);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_find_range);
@@ -6037,36 +6056,48 @@ EXPORT_SYMBOL_GPL(mas_find_range);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- if (mas_is_active(mas)) {
- if (mas->index > min)
- return false;
-
- return true;
- }
- if (mas_is_paused(mas)) {
+ switch (mas->status) {
+ case ma_active:
+ goto active;
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->index <= min)) {
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
mas->last = --mas->index;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (mas->index <= min)
goto none;
mas->last = mas->index;
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
- if (mas->last <= min) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_overflow: /* user expects the mas to be one after where it is */
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow: /* user expects the mas to be one before where it is */
+ if (unlikely(mas->index <= min))
+ return true;
+
+ mas->status = ma_active;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -6079,29 +6110,28 @@ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
return true;
}
- if (unlikely(!mas_searchable(mas))) {
- if (mas_is_ptr(mas))
- goto none;
+ if (unlikely(mas_is_ptr(mas)))
+ goto none;
- if (mas_is_none(mas)) {
- /*
- * Walked to the location, and there was nothing so the
- * previous location is 0.
- */
- mas->last = mas->index = 0;
- mas->node = MAS_ROOT;
- *entry = mas_root(mas);
- return true;
- }
+ if (unlikely(mas_is_none(mas))) {
+ /*
+ * Walked to the location, and there was nothing so the previous
+ * location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->status = ma_root;
+ *entry = mas_root(mas);
+ return true;
}
+active:
if (mas->index < min)
return true;
return false;
none:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -6114,7 +6144,7 @@ none:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6126,7 +6156,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, false, false);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
@@ -6140,7 +6170,7 @@ EXPORT_SYMBOL_GPL(mas_find_rev);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6152,7 +6182,7 @@ void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, true, false);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
@@ -6172,8 +6202,8 @@ void *mas_erase(struct ma_state *mas)
void *entry;
MA_WR_STATE(wr_mas, mas, NULL);
- if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->status = ma_start;
/* Retry unnecessary when holding the write lock. */
entry = mas_state_walk(mas);
@@ -6218,7 +6248,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
if (!mas_allocated(mas))
return false;
- mas->node = MAS_START;
+ mas->status = ma_start;
return true;
}
@@ -6476,6 +6506,278 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index)
}
EXPORT_SYMBOL(mtree_erase);
+/*
+ * mas_dup_free() - Free an incomplete duplication of a tree.
+ * @mas: The maple state of a incomplete tree.
+ *
+ * The parameter @mas->node passed in indicates that the allocation failed on
+ * this node. This function frees all nodes starting from @mas->node in the
+ * reverse order of mas_dup_build(). There is no need to hold the source tree
+ * lock at this time.
+ */
+static void mas_dup_free(struct ma_state *mas)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char count, i;
+
+ /* Maybe the first node allocation failed. */
+ if (mas_is_none(mas))
+ return;
+
+ while (!mte_is_root(mas->node)) {
+ mas_ascend(mas);
+ if (mas->offset) {
+ mas->offset--;
+ do {
+ mas_descend(mas);
+ mas->offset = mas_data_end(mas);
+ } while (!mte_is_leaf(mas->node));
+
+ mas_ascend(mas);
+ }
+
+ node = mte_to_node(mas->node);
+ type = mte_node_type(mas->node);
+ slots = ma_slots(node, type);
+ count = mas_data_end(mas) + 1;
+ for (i = 0; i < count; i++)
+ ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
+ mt_free_bulk(count, slots);
+ }
+
+ node = mte_to_node(mas->node);
+ mt_free_one(node);
+}
+
+/*
+ * mas_copy_node() - Copy a maple node and replace the parent.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @parent: The parent of the new node.
+ *
+ * Copy @mas->node to @new_mas->node, set @parent to be the parent of
+ * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
+ struct maple_pnode *parent)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ unsigned long val;
+
+ /* Copy the node completely. */
+ memcpy(new_node, node, sizeof(struct maple_node));
+ /* Update the parent node pointer. */
+ val = (unsigned long)node->parent & MAPLE_NODE_MASK;
+ new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
+}
+
+/*
+ * mas_dup_alloc() - Allocate child nodes for a maple node.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function allocates child nodes for @new_mas->node during the duplication
+ * process. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ enum maple_type type;
+ unsigned char request, count, i;
+ void __rcu **slots;
+ void __rcu **new_slots;
+ unsigned long val;
+
+ /* Allocate memory for child nodes. */
+ type = mte_node_type(mas->node);
+ new_slots = ma_slots(new_node, type);
+ request = mas_data_end(mas) + 1;
+ count = mt_alloc_bulk(gfp, request, (void **)new_slots);
+ if (unlikely(count < request)) {
+ memset(new_slots, 0, request * sizeof(void *));
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ /* Restore node type information in slots. */
+ slots = ma_slots(node, type);
+ for (i = 0; i < count; i++) {
+ val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
+ val &= MAPLE_NODE_MASK;
+ ((unsigned long *)new_slots)[i] |= val;
+ }
+}
+
+/*
+ * mas_dup_build() - Build a new maple tree from a source tree
+ * @mas: The maple state of source tree, need to be in MAS_START state.
+ * @new_mas: The maple state of new tree, need to be in MAS_START state.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function builds a new tree in DFS preorder. If the memory allocation
+ * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
+ * last node. mas_dup_free() will free the incomplete duplication of a tree.
+ *
+ * Note that the attributes of the two trees need to be exactly the same, and the
+ * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
+ */
+static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node;
+ struct maple_pnode *parent = NULL;
+ struct maple_enode *root;
+ enum maple_type type;
+
+ if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
+ unlikely(!mtree_empty(new_mas->tree))) {
+ mas_set_err(mas, -EINVAL);
+ return;
+ }
+
+ root = mas_start(mas);
+ if (mas_is_ptr(mas) || mas_is_none(mas))
+ goto set_new_tree;
+
+ node = mt_alloc_one(gfp);
+ if (!node) {
+ new_mas->status = ma_none;
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ type = mte_node_type(mas->node);
+ root = mt_mk_node(node, type);
+ new_mas->node = root;
+ new_mas->min = 0;
+ new_mas->max = ULONG_MAX;
+ root = mte_mk_root(root);
+ while (1) {
+ mas_copy_node(mas, new_mas, parent);
+ if (!mte_is_leaf(mas->node)) {
+ /* Only allocate child nodes for non-leaf nodes. */
+ mas_dup_alloc(mas, new_mas, gfp);
+ if (unlikely(mas_is_err(mas)))
+ return;
+ } else {
+ /*
+ * This is the last leaf node and duplication is
+ * completed.
+ */
+ if (mas->max == ULONG_MAX)
+ goto done;
+
+ /* This is not the last leaf node and needs to go up. */
+ do {
+ mas_ascend(mas);
+ mas_ascend(new_mas);
+ } while (mas->offset == mas_data_end(mas));
+
+ /* Move to the next subtree. */
+ mas->offset++;
+ new_mas->offset++;
+ }
+
+ mas_descend(mas);
+ parent = ma_parent_ptr(mte_to_node(new_mas->node));
+ mas_descend(new_mas);
+ mas->offset = 0;
+ new_mas->offset = 0;
+ }
+done:
+ /* Specially handle the parent of the root node. */
+ mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
+set_new_tree:
+ /* Make them the same height */
+ new_mas->tree->ma_flags = mas->tree->ma_flags;
+ rcu_assign_pointer(new_mas->tree->ma_root, root);
+}
+
+/**
+ * __mt_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ * Note that the user needs to manually lock the source tree and the new tree.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_dup_build(&mas, &new_mas, gfp);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__mt_dup);
+
+/**
+ * mtree_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_lock(&new_mas);
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+ mas_dup_build(&mas, &new_mas, gfp);
+ mas_unlock(&mas);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ mas_unlock(&new_mas);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_dup);
+
/**
* __mt_destroy() - Walk and free all nodes of a locked maple tree.
* @mt: The maple tree
@@ -6490,7 +6792,7 @@ void __mt_destroy(struct maple_tree *mt)
if (xa_is_node(root))
mte_destroy_walk(root, mt);
- mt->ma_flags = 0;
+ mt->ma_flags = mt_attr(mt);
}
EXPORT_SYMBOL_GPL(__mt_destroy);
@@ -6549,7 +6851,7 @@ retry:
if (entry)
goto unlock;
- while (mas_searchable(&mas) && (mas.last < max)) {
+ while (mas_is_active(&mas) && (mas.last < max)) {
entry = mas_next_entry(&mas, max);
if (likely(entry && !xa_is_zero(entry)))
break;
@@ -6631,26 +6933,6 @@ unsigned int mt_nr_allocated(void)
return kmem_cache_nr_allocated(maple_node_cache);
}
-/*
- * mas_dead_node() - Check if the maple state is pointing to a dead node.
- * @mas: The maple state
- * @index: The index to restore in @mas.
- *
- * Used in test code.
- * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
- */
-static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
-{
- if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
- return 0;
-
- if (likely(!mte_dead_node(mas->node)))
- return 0;
-
- mas_rewalk(mas, index);
- return 1;
-}
-
void mt_cache_shrink(void)
{
}
@@ -6689,11 +6971,11 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
- struct maple_enode *p = MAS_NONE, *mn = mas->node;
+ struct maple_enode *p, *mn = mas->node;
unsigned long p_min, p_max;
mas_next_node(mas, mas_mn(mas), max);
- if (!mas_is_none(mas))
+ if (!mas_is_overflow(mas))
return;
if (mte_is_root(mn))
@@ -6706,7 +6988,7 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
- } while (!mas_is_none(mas));
+ } while (!mas_is_underflow(mas));
mas->node = p;
mas->max = p_max;
@@ -6729,7 +7011,6 @@ static void mt_dump_range(unsigned long min, unsigned long max,
else
pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
break;
- default:
case mt_dump_dec:
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
@@ -6769,7 +7050,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6799,7 +7079,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
- default:
case mt_dump_dec:
pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
@@ -6824,7 +7103,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%lx ", node->gap[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%lu ", node->gap[i]);
}
@@ -6835,7 +7113,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6976,7 +7253,8 @@ static void mas_validate_gaps(struct ma_state *mas)
counted:
if (mt == maple_arange_64) {
- offset = ma_meta_gap(node, mt);
+ MT_BUG_ON(mas->tree, !gaps);
+ offset = ma_meta_gap(node);
if (offset > i) {
pr_err("gap offset %p[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
@@ -6988,7 +7266,6 @@ counted:
MT_BUG_ON(mas->tree, 1);
}
- MT_BUG_ON(mas->tree, !gaps);
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
pr_err("gap %p[%u] beyond node limit != 0\n",
@@ -7166,7 +7443,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
- if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
return;
while (!mte_is_leaf(mas.node))
@@ -7183,7 +7460,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
last = entry;
if (offset == mas_data_end(&mas)) {
mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
- if (mas_is_none(&mas))
+ if (mas_is_overflow(&mas))
return;
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
@@ -7192,7 +7469,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
offset++;
}
- } while (!mas_is_none(&mas));
+ } while (!mas_is_overflow(&mas));
}
/*
@@ -7207,13 +7484,13 @@ void mt_validate(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);
- if (!mas_searchable(&mas))
+ if (!mas_is_active(&mas))
goto done;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
- while (!mas_is_none(&mas)) {
+ while (!mas_is_overflow(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
@@ -7238,16 +7515,35 @@ EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
- if (mas_is_none(mas))
- pr_err("(MAS_NONE) ");
- else if (mas_is_ptr(mas))
- pr_err("(MAS_ROOT) ");
- else if (mas_is_start(mas))
- pr_err("(MAS_START) ");
- else if (mas_is_paused(mas))
- pr_err("(MAS_PAUSED) ");
-
- pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
+ switch (mas->status) {
+ case ma_active:
+ pr_err("(ma_active)");
+ break;
+ case ma_none:
+ pr_err("(ma_none)");
+ break;
+ case ma_root:
+ pr_err("(ma_root)");
+ break;
+ case ma_start:
+ pr_err("(ma_start) ");
+ break;
+ case ma_pause:
+ pr_err("(ma_pause) ");
+ break;
+ case ma_overflow:
+ pr_err("(ma_overflow) ");
+ break;
+ case ma_underflow:
+ pr_err("(ma_underflow) ");
+ break;
+ case ma_error:
+ pr_err("(ma_error) ");
+ break;
+ }
+
+ pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
+ mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
if (mas->index > mas->last)
@@ -7260,7 +7556,7 @@ void mas_wr_dump(const struct ma_wr_state *wr_mas)
pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
- wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
+ wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
wr_mas->end_piv);
}
EXPORT_SYMBOL_GPL(mas_wr_dump);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 2f5aa851834e..a0be5d05c7f0 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -18,11 +18,14 @@
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kmsan.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
@@ -32,14 +35,23 @@
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-#define DEPOT_VALID_BITS 1
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
#define DEPOT_STACK_ALIGN 4
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
-#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
- DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
+/*
+ * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
+ * traces. As KMSAN does not support evicting stack traces from the stack
+ * depot, the stack depot capacity might be reached quickly with large stack
+ * records. Adjust the maximum number of stack depot pools for this case.
+ */
+#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
+#else
#define DEPOT_POOLS_CAP 8192
+#endif
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
@@ -50,19 +62,22 @@ union handle_parts {
struct {
u32 pool_index : DEPOT_POOL_INDEX_BITS;
u32 offset : DEPOT_OFFSET_BITS;
- u32 valid : DEPOT_VALID_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
- struct stack_record *next; /* Link in the hash table */
- u32 hash; /* Hash in the hash table */
+ struct list_head list; /* Links in hash table or freelist */
+ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of frames */
+ refcount_t count;
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
};
+#define DEPOT_STACK_RECORD_SIZE \
+ ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
+
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
@@ -75,40 +90,34 @@ static bool __stack_depot_early_init_passed __initdata;
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
-/* Hash table of pointers to stored stack traces. */
-static struct stack_record **stack_table;
+/* Hash table of stored stack records. */
+static struct list_head *stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
-/* Array of memory regions that store stack traces. */
+/* Array of memory regions that store stack records. */
static void *stack_pools[DEPOT_MAX_POOLS];
-/* Currently used pool in stack_pools. */
-static int pool_index;
-/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset;
-/* Lock that protects the variables above. */
-static DEFINE_RAW_SPINLOCK(pool_lock);
+/* Newly allocated pool that is not yet added to stack_pools. */
+static void *new_pool;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Freelist of stack records within stack_pools. */
+static LIST_HEAD(free_stacks);
/*
* Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool.
- * This flag marks that this next extra pool needs to be allocated and
- * initialized. It has the value 0 when either the next pool is not yet
- * initialized or the limit on the number of pools is reached.
+ * of space in the currently used pool. This flag marks whether this extra pool
+ * needs to be allocated. It has the value 0 when either an extra pool is not
+ * yet allocated or if the limit on the number of pools is reached.
*/
-static int next_pool_required = 1;
+static bool new_pool_required = true;
+/* Lock that protects the variables above. */
+static DEFINE_RWLOCK(pool_rwlock);
static int __init disable_stack_depot(char *str)
{
- int ret;
-
- ret = kstrtobool(str, &stack_depot_disabled);
- if (!ret && stack_depot_disabled) {
- pr_info("disabled\n");
- stack_table = NULL;
- }
- return 0;
+ return kstrtobool(str, &stack_depot_disabled);
}
early_param("stack_depot_disable", disable_stack_depot);
@@ -120,6 +129,15 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true;
}
+/* Initialize list_head's within the hash table. */
+static void init_stack_table(unsigned long entries)
+{
+ unsigned long i;
+
+ for (i = 0; i < entries; i++)
+ INIT_LIST_HEAD(&stack_table[i]);
+}
+
/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
@@ -131,6 +149,15 @@ int __init stack_depot_early_init(void)
__stack_depot_early_init_passed = true;
/*
+ * Print disabled message even if early init has not been requested:
+ * stack_depot_init() will not print one.
+ */
+ if (stack_depot_disabled) {
+ pr_info("disabled\n");
+ return 0;
+ }
+
+ /*
* If KASAN is enabled, use the maximum order: KASAN is frequently used
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
@@ -138,21 +165,25 @@ int __init stack_depot_early_init(void)
if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
- if (!__stack_depot_early_init_requested || stack_depot_disabled)
+ /*
+ * Check if early init has been requested after setting
+ * stack_bucket_number_order: stack_depot_init() uses its value.
+ */
+ if (!__stack_depot_early_init_requested)
return 0;
/*
* If stack_bucket_number_order is not set, leave entries as 0 to rely
- * on the automatic calculations performed by alloc_large_system_hash.
+ * on the automatic calculations performed by alloc_large_system_hash().
*/
if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
- sizeof(struct stack_record *),
+ sizeof(struct list_head),
entries,
STACK_HASH_TABLE_SCALE,
- HASH_EARLY | HASH_ZERO,
+ HASH_EARLY,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
@@ -162,6 +193,14 @@ int __init stack_depot_early_init(void)
stack_depot_disabled = true;
return -ENOMEM;
}
+ if (!entries) {
+ /*
+ * Obtain the number of entries that was calculated by
+ * alloc_large_system_hash().
+ */
+ entries = stack_hash_mask + 1;
+ }
+ init_stack_table(entries);
return 0;
}
@@ -202,7 +241,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
- stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+ stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -210,6 +249,7 @@ int stack_depot_init(void)
goto out_unlock;
}
stack_hash_mask = entries - 1;
+ init_stack_table(entries);
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
@@ -218,41 +258,103 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-/* Uses preallocated memory to initialize a new stack depot pool. */
-static void depot_init_pool(void **prealloc)
+/* Initializes a stack depol pool. */
+static void depot_init_pool(void *pool)
{
+ int offset;
+
+ lockdep_assert_held_write(&pool_rwlock);
+
+ WARN_ON(!list_empty(&free_stacks));
+
+ /* Initialize handles and link stack records into the freelist. */
+ for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
+ offset += DEPOT_STACK_RECORD_SIZE) {
+ struct stack_record *stack = pool + offset;
+
+ stack->handle.pool_index = pools_num;
+ stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+
+ list_add(&stack->list, &free_stacks);
+ }
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = pool;
+ pools_num++;
+}
+
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
/*
- * If the next pool is already initialized or the maximum number of
+ * If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
- * smp_load_acquire() here pairs with smp_store_release() below and
- * in depot_alloc_stack().
*/
- if (!smp_load_acquire(&next_pool_required))
+ if (!new_pool_required)
return;
- /* Check if the current pool is not yet allocated. */
- if (stack_pools[pool_index] == NULL) {
- /* Use the preallocated memory for the current pool. */
- stack_pools[pool_index] = *prealloc;
+ /*
+ * Use the preallocated memory for the new pool
+ * as long as we do not exceed the maximum number of pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS) {
+ new_pool = *prealloc;
*prealloc = NULL;
- } else {
- /*
- * Otherwise, use the preallocated memory for the next pool
- * as long as we do not exceed the maximum number of pools.
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS) {
- stack_pools[pool_index + 1] = *prealloc;
- *prealloc = NULL;
- }
- /*
- * At this point, either the next pool is initialized or the
- * maximum number of pools is reached. In either case, take
- * note that initializing another pool is not required.
- * This smp_store_release pairs with smp_load_acquire() above
- * and in stack_depot_save().
- */
- smp_store_release(&next_pool_required, 0);
}
+
+ /*
+ * At this point, either a new pool is kept or the maximum
+ * number of pools is reached. In either case, take note that
+ * keeping another pool is not required.
+ */
+ new_pool_required = false;
+}
+
+/* Updates references to the current and the next stack depot pools. */
+static bool depot_update_pools(void **prealloc)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
+ /* Check if we still have objects in the freelist. */
+ if (!list_empty(&free_stacks))
+ goto out_keep_prealloc;
+
+ /* Check if we have a new pool saved and use it. */
+ if (new_pool) {
+ depot_init_pool(new_pool);
+ new_pool = NULL;
+
+ /* Take note that we might need a new new_pool. */
+ if (pools_num < DEPOT_MAX_POOLS)
+ new_pool_required = true;
+
+ /* Try keeping the preallocated memory for new_pool. */
+ goto out_keep_prealloc;
+ }
+
+ /* Bail out if we reached the pool limit. */
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
+
+ /* Check if we have preallocated memory and use it. */
+ if (*prealloc) {
+ depot_init_pool(*prealloc);
+ *prealloc = NULL;
+ return true;
+ }
+
+ return false;
+
+out_keep_prealloc:
+ /* Keep the preallocated memory for a new pool if required. */
+ if (*prealloc)
+ depot_keep_new_pool(prealloc);
+ return true;
}
/* Allocates a new stack in a stack depot pool. */
@@ -260,62 +362,72 @@ static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
- required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+ lockdep_assert_held_write(&pool_rwlock);
- /* Check if there is not enough space in the current pool. */
- if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
- /* Bail out if we reached the pool limit. */
- if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return NULL;
- }
+ /* Update current and new pools if required and possible. */
+ if (!depot_update_pools(prealloc))
+ return NULL;
- /*
- * Move on to the next pool.
- * WRITE_ONCE pairs with potential concurrent read in
- * stack_depot_fetch().
- */
- WRITE_ONCE(pool_index, pool_index + 1);
- pool_offset = 0;
- /*
- * If the maximum number of pools is not reached, take note
- * that the next pool needs to initialized.
- * smp_store_release() here pairs with smp_load_acquire() in
- * stack_depot_save() and depot_init_pool().
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS)
- smp_store_release(&next_pool_required, 1);
- }
+ /* Check if we have a stack record to save the stack trace. */
+ if (list_empty(&free_stacks))
+ return NULL;
- /* Assign the preallocated memory to a pool if required. */
- if (*prealloc)
- depot_init_pool(prealloc);
+ /* Get and unlink the first entry from the freelist. */
+ stack = list_first_entry(&free_stacks, struct stack_record, list);
+ list_del(&stack->list);
- /* Check if we have a pool to save the stack trace. */
- if (stack_pools[pool_index] == NULL)
- return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
+ size = CONFIG_STACKDEPOT_MAX_FRAMES;
/* Save the stack trace. */
- stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
stack->size = size;
- stack->handle.pool_index = pool_index;
- stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
- stack->handle.valid = 1;
- stack->handle.extra = 0;
+ /* stack->handle is already filled in by depot_init_pool(). */
+ refcount_set(&stack->count, 1);
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- pool_offset += required_size;
+
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, required_size);
+ kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
return stack;
}
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+ void *pool;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
+
+ lockdep_assert_held(&pool_rwlock);
+
+ if (parts.pool_index > pools_num) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pools_num, handle);
+ return NULL;
+ }
+
+ pool = stack_pools[parts.pool_index];
+ if (!pool)
+ return NULL;
+
+ stack = pool + offset;
+ return stack;
+}
+
+/* Links stack into the freelist. */
+static void depot_free_stack(struct stack_record *stack)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
+ list_add(&stack->list, &free_stacks);
+}
+
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
@@ -340,13 +452,17 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
}
/* Finds a stack in a bucket of the hash table. */
-static inline struct stack_record *find_stack(struct stack_record *bucket,
+static inline struct stack_record *find_stack(struct list_head *bucket,
unsigned long *entries, int size,
u32 hash)
{
+ struct list_head *pos;
struct stack_record *found;
- for (found = bucket; found; found = found->next) {
+ lockdep_assert_held(&pool_rwlock);
+
+ list_for_each(pos, bucket) {
+ found = list_entry(pos, struct stack_record, list);
if (found->hash == hash &&
found->size == size &&
!stackdepot_memcmp(entries, found->entries, size))
@@ -355,17 +471,24 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags, bool can_alloc)
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags)
{
- struct stack_record *found = NULL, **bucket;
- union handle_parts retval = { .handle = 0 };
+ struct list_head *bucket;
+ struct stack_record *found = NULL;
+ depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
+ bool need_alloc = false;
unsigned long flags;
u32 hash;
+ if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
+ return 0;
+
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth.
@@ -377,28 +500,36 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled)
- goto fast_exit;
+ return 0;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
- /*
- * Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
- */
- found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
- if (found)
+ read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ /* Fast path: look the stack trace up without full locking. */
+ found = find_stack(bucket, entries, nr_entries, hash);
+ if (found) {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
+ }
+
+ /* Take note if another stack pool needs to be allocated. */
+ if (new_pool_required)
+ need_alloc = true;
+
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
/*
- * Check if another stack pool needs to be initialized. If so, allocate
- * the memory now - we won't be able to do that under the lock.
- *
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+ * Allocate memory for a new pool if required now:
+ * we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
+ if (unlikely(can_alloc && need_alloc)) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -412,63 +543,56 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&pool_lock, flags);
+ write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
- found = find_stack(*bucket, entries, nr_entries, hash);
+ found = find_stack(bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
- new->next = *bucket;
- /*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
- */
- smp_store_release(bucket, new);
+ list_add(&new->list, bucket);
found = new;
}
- } else if (prealloc) {
+ } else {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
/*
* Stack depot already contains this stack trace, but let's
- * keep the preallocated memory for the future.
+ * keep the preallocated memory for future.
*/
- depot_init_pool(&prealloc);
+ if (prealloc)
+ depot_keep_new_pool(&prealloc);
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
+ printk_deferred_exit();
+ write_unlock_irqrestore(&pool_rwlock, flags);
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
- retval.handle = found->handle.handle;
-fast_exit:
- return retval.handle;
+ handle = found->handle.handle;
+ return handle;
}
-EXPORT_SYMBOL_GPL(__stack_depot_save);
+EXPORT_SYMBOL_GPL(stack_depot_save_flags);
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+ return stack_depot_save_flags(entries, nr_entries, alloc_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
- union handle_parts parts = { .handle = handle };
- /*
- * READ_ONCE pairs with potential concurrent write in
- * depot_alloc_stack.
- */
- int pool_index_cached = READ_ONCE(pool_index);
- void *pool;
- size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
+ unsigned long flags;
*entries = NULL;
/*
@@ -477,24 +601,51 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
*/
kmsan_unpoison_memory(entries, sizeof(*entries));
- if (!handle)
+ if (!handle || stack_depot_disabled)
return 0;
- if (parts.pool_index > pool_index_cached) {
- WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pool_index_cached, handle);
- return 0;
- }
- pool = stack_pools[parts.pool_index];
- if (!pool)
- return 0;
- stack = pool + offset;
+ read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ stack = depot_fetch_stack(handle);
+
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
+void stack_depot_put(depot_stack_handle_t handle)
+{
+ struct stack_record *stack;
+ unsigned long flags;
+
+ if (!handle || stack_depot_disabled)
+ return;
+
+ write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ stack = depot_fetch_stack(handle);
+ if (WARN_ON(!stack))
+ goto out;
+
+ if (refcount_dec_and_test(&stack->count)) {
+ /* Unlink stack from the hash table. */
+ list_del(&stack->list);
+
+ /* Free stack. */
+ depot_free_stack(stack);
+ }
+
+out:
+ printk_deferred_exit();
+ write_unlock_irqrestore(&pool_rwlock, flags);
+}
+EXPORT_SYMBOL_GPL(stack_depot_put);
+
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
diff --git a/lib/test_ida.c b/lib/test_ida.c
index 55105baa19da..072a49897e71 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -13,7 +13,7 @@ static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
-void ida_dump(struct ida *ida) { }
+static void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 464eeb90d5ad..29185ac5c727 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
@@ -54,6 +55,11 @@ atomic_t maple_tree_tests_passed;
#else
#define cond_resched() do {} while (0)
#endif
+
+#define mas_is_none(x) ((x)->status == ma_none)
+#define mas_is_overflow(x) ((x)->status == ma_overflow)
+#define mas_is_underflow(x) ((x)->status == ma_underflow)
+
static int __init mtree_insert_index(struct maple_tree *mt,
unsigned long index, gfp_t gfp)
{
@@ -582,7 +588,7 @@ static noinline void __init check_find(struct maple_tree *mt)
MT_BUG_ON(mt, last != mas.last);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.index = ULONG_MAX;
mas.last = ULONG_MAX;
entry2 = mas_prev(&mas, 0);
@@ -1749,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
@@ -1834,47 +1853,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
-static noinline void __init check_forking(struct maple_tree *mt)
+static noinline void __init check_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
- mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- down_write(&newmt_lock);
- mas.index = 0;
- mas.last = 0;
- if (mas_expected_entries(&newmas, nr_entries)) {
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
- rcu_read_lock();
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
- rcu_read_unlock();
+
mas_destroy(&newmas);
+ mas_destroy(&mas);
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
+ __mt_destroy(&mt);
up_write(&newmt_lock);
+ up_write(&mt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1977,49 +1997,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
-static noinline void __init bench_forking(struct maple_tree *mt)
+static noinline void __init bench_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134, nr_fork = 80000;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- mt_set_external_lock(&newmt, &newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
for (i = 0; i < nr_fork; i++) {
- mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- mas.index = 0;
- mas.last = 0;
- rcu_read_lock();
- down_write(&newmt_lock);
- if (mas_expected_entries(&newmas, nr_entries)) {
- printk("OOM!");
+ mt_init_flags(&newmt,
+ MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
BUG_ON(1);
}
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
+
mas_destroy(&newmas);
- rcu_read_unlock();
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
up_write(&newmt_lock);
}
+ mas_destroy(&mas);
+ __mt_destroy(&mt);
+ up_write(&mt_lock);
}
#endif
@@ -2175,7 +2197,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
mas.index = 0;
mas.last = 5;
@@ -3039,10 +3061,6 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE active active range of NULL
*/
-#define mas_active(x) (((x).node != MAS_ROOT) && \
- ((x).node != MAS_START) && \
- ((x).node != MAS_PAUSE) && \
- ((x).node != MAS_NONE))
static noinline void __init check_state_handling(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
@@ -3057,7 +3075,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
/* prev: Start -> underflow*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
/* prev: Start -> root */
mas_set(&mas, 10);
@@ -3065,7 +3083,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* prev: pause -> root */
mas_set(&mas, 10);
@@ -3074,7 +3092,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* next: start -> none */
mas_set(&mas, 0);
@@ -3082,7 +3100,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* next: start -> none*/
mas_set(&mas, 10);
@@ -3090,7 +3108,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> root */
mas_set(&mas, 0);
@@ -3098,21 +3116,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find: root -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: none -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> none */
mas_set(&mas, 10);
@@ -3120,14 +3138,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> root */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: start -> root */
mas_set(&mas, 0);
@@ -3135,21 +3153,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: root -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: start -> root */
mas_set(&mas, 10);
@@ -3157,7 +3175,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: start -> none */
mas_set(&mas, 10);
@@ -3165,7 +3183,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: pause -> none*/
mas_set(&mas, 10);
@@ -3174,7 +3192,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
mas.index = mas.last = 10;
@@ -3182,14 +3200,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: start -> root */
mas_set(&mas, 0);
@@ -3197,7 +3215,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: pause -> root */
mas_set(&mas, 0);
@@ -3206,22 +3224,22 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: none -> root */
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> root */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> none */
mas_set(&mas, 10);
@@ -3229,7 +3247,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> root */
mas.index = mas.last = 0;
@@ -3237,7 +3255,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
mas_unlock(&mas);
@@ -3255,7 +3273,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: pause ->active */
mas_set(&mas, 0);
@@ -3264,126 +3282,132 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none ->active */
mas.index = mas.last = 0;
mas.offset = 0;
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active ->active */
- entry = mas_next(&mas, ULONG_MAX);
+ /* next:active ->active (spanning limit) */
+ entry = mas_next(&mas, 0x2100);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> active beyond data */
+ /* next:active -> overflow (limit reached) beyond data */
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
- /* Continue after last range ends after max */
+ /* next:overflow -> active (limit changed) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
-
- /* next:active -> active continued */
- entry = mas_next(&mas, ULONG_MAX);
- MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.index != 0x3501);
- MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> overflow */
+ /* next:active -> overflow (limit reached) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* next:overflow -> overflow */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* prev:overflow -> active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none -> active, skip value at location */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.offset = 0;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:active ->active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning end range */
+ /* prev:active -> underflow (span limit) */
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0x1200);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */
+ entry = mas_prev(&mas, 0x1200); /* underflow */
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow (lower limit) spanning end range */
entry = mas_prev(&mas, 0x0100);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
- /* prev:active -> underflow */
+ /* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* next:underflow -> active */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:first value -> underflow */
entry = mas_prev(&mas, 0x1000);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find:underflow -> first value */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev: pause ->active */
mas_set(&mas, 0x3600);
@@ -3394,21 +3418,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning min */
+ /* prev:active -> underflow spanning min */
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev: active ->active, continue */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active */
mas_set(&mas, 0);
@@ -3416,7 +3440,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: pause ->active */
mas_set(&mas, 0);
@@ -3425,7 +3449,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active on value */;
mas_set(&mas, 1200);
@@ -3433,14 +3457,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active ->active */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL)*/
@@ -3448,35 +3472,35 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find: overflow ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL) end*/
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find_rev: active (END) ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev:active ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev: pause ->active */
mas_pause(&mas);
@@ -3484,14 +3508,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* find_rev:active -> active */
+ /* find_rev:active -> underflow */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find_rev: start ->active */
mas_set(&mas, 0x1200);
@@ -3499,7 +3523,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1200);
@@ -3507,7 +3531,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1600);
@@ -3515,7 +3539,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause ->active */
mas_set(&mas, 0x1200);
@@ -3524,7 +3548,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause -> active */
mas_set(&mas, 0x1600);
@@ -3533,25 +3557,25 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1200);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1600);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1200;
@@ -3561,7 +3585,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1600;
@@ -3570,7 +3594,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
mas_unlock(&mas);
}
@@ -3585,10 +3609,6 @@ static int __init maple_tree_seed(void)
pr_info("\nTEST STARTING\n\n");
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_root_expand(&tree);
- mtree_destroy(&tree);
-
#if defined(BENCH_SLOT_STORE)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3617,13 +3637,18 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
-#if defined(BENCH_FORK)
+#if defined(BENCH_LOAD)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- bench_forking(&tree);
+ bench_load(&tree);
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ bench_forking();
+ goto skip;
+#endif
#if defined(BENCH_MT_FOR_EACH)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3647,13 +3672,15 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_iteration(&tree);
+ check_root_expand(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_forking(&tree);
+ check_iteration(&tree);
mtree_destroy(&tree);
+ check_forking();
+
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 0ae35223d773..0dc173849a54 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;
- for (i = 0; i <= MAX_ORDER; i++)
+ for (i = 0; i < NR_PAGE_ORDERS; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
index 62b4e8b3c733..a94cd56a1e4c 100644
--- a/lib/trace_readwrite.c
+++ b/lib/trace_readwrite.c
@@ -7,7 +7,7 @@
#include <linux/ftrace.h>
#include <linux/module.h>
-#include <asm-generic/io.h>
+#include <linux/io.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rwmmio.h>
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 3f90810f9f42..df4f8d1354bb 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -204,8 +204,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
- pr_err("========================================"
- "========================================\n");
+ pr_warn(CUT_HERE);
+
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
@@ -215,8 +215,7 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
static void ubsan_epilogue(void)
{
dump_stack();
- pr_err("========================================"
- "========================================\n");
+ pr_warn("---[ end trace ]---\n");
current->in_ubsan--;
diff --git a/mm/Kconfig b/mm/Kconfig
index 57cd378c73d6..1902cfe4cc4f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -61,6 +61,20 @@ config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
The cost is that if the page was never dirtied and needs to be
swapped out again, it will be re-compressed.
+config ZSWAP_SHRINKER_DEFAULT_ON
+ bool "Shrink the zswap pool on memory pressure"
+ depends on ZSWAP
+ default n
+ help
+ If selected, the zswap shrinker will be enabled, and the pages
+ stored in the zswap pool will become available for reclaim (i.e
+ written back to the backing swap device) on memory pressure.
+
+ This means that zswap writeback could happen even if the pool is
+ not yet full, or the cgroup zswap limit has not been reached,
+ reducing the chance that cold pages will reside in the zswap pool
+ and consume memory indefinitely.
+
choice
prompt "Default compressor"
depends on ZSWAP
@@ -226,52 +240,17 @@ config ZSMALLOC_CHAIN_SIZE
For more information, see zsmalloc documentation.
-menu "SLAB allocator options"
-
-choice
- prompt "Choose SLAB allocator"
- default SLUB
- help
- This option allows to select a slab allocator.
-
-config SLAB_DEPRECATED
- bool "SLAB (DEPRECATED)"
- depends on !PREEMPT_RT
- help
- Deprecated and scheduled for removal in a few cycles. Replaced by
- SLUB.
-
- If you cannot migrate to SLUB, please contact linux-mm@kvack.org
- and the people listed in the SLAB ALLOCATOR section of MAINTAINERS
- file, explaining why.
-
- The regular slab allocator that is established and known to work
- well in all environments. It organizes cache hot objects in
- per cpu and per node queues.
+menu "Slab allocator options"
config SLUB
- bool "SLUB (Unqueued Allocator)"
- help
- SLUB is a slab allocator that minimizes cache line usage
- instead of managing queues of cached objects (SLAB approach).
- Per cpu caching is realized using slabs of objects instead
- of queues of objects. SLUB can use memory efficiently
- and has enhanced diagnostics. SLUB is the default choice for
- a slab allocator.
-
-endchoice
-
-config SLAB
- bool
- default y
- depends on SLAB_DEPRECATED
+ def_bool y
config SLUB_TINY
- bool "Configure SLUB for minimal memory footprint"
- depends on SLUB && EXPERT
+ bool "Configure for minimal memory footprint"
+ depends on EXPERT
select SLAB_MERGE_DEFAULT
help
- Configures the SLUB allocator in a way to achieve minimal memory
+ Configures the slab allocator in a way to achieve minimal memory
footprint, sacrificing scalability, debugging and other features.
This is intended only for the smallest system that had used the
SLOB allocator and is not recommended for systems with more than
@@ -282,7 +261,6 @@ config SLUB_TINY
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
default y
- depends on SLAB || SLUB
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
@@ -296,7 +274,7 @@ config SLAB_MERGE_DEFAULT
config SLAB_FREELIST_RANDOM
bool "Randomize slab freelist"
- depends on SLAB || (SLUB && !SLUB_TINY)
+ depends on !SLUB_TINY
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
@@ -304,21 +282,19 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
- depends on SLAB || (SLUB && !SLUB_TINY)
+ depends on !SLUB_TINY
help
Many kernel heap attacks try to target slab cache metadata and
other infrastructure. This options makes minor performance
sacrifices to harden the kernel slab allocator against common
- freelist exploit methods. Some slab implementations have more
- sanity-checking than others. This option is most effective with
- CONFIG_SLUB.
+ freelist exploit methods.
config SLUB_STATS
default n
- bool "Enable SLUB performance statistics"
- depends on SLUB && SYSFS && !SLUB_TINY
+ bool "Enable performance statistics"
+ depends on SYSFS && !SLUB_TINY
help
- SLUB statistics are useful to debug SLUBs allocation behavior in
+ The statistics are useful to debug slab allocation behavior in
order find ways to optimize the allocator. This should never be
enabled for production use since keeping statistics slows down
the allocator by a few percentage points. The slabinfo command
@@ -328,8 +304,8 @@ config SLUB_STATS
config SLUB_CPU_PARTIAL
default y
- depends on SLUB && SMP && !SLUB_TINY
- bool "SLUB per cpu partial cache"
+ depends on SMP && !SLUB_TINY
+ bool "Enable per cpu partial caches"
help
Per cpu partial caches accelerate objects allocation and freeing
that is local to a processor at the price of more indeterminism
@@ -339,7 +315,7 @@ config SLUB_CPU_PARTIAL
config RANDOM_KMALLOC_CACHES
default n
- depends on SLUB && !SLUB_TINY
+ depends on !SLUB_TINY
bool "Randomize slab caches for normal kmalloc"
help
A hardening feature that creates multiple copies of slab caches for
@@ -354,7 +330,7 @@ config RANDOM_KMALLOC_CACHES
limited degree of memory and CPU overhead that relates to hardware and
system workload.
-endmenu # SLAB allocator options
+endmenu # Slab allocator options
config SHUFFLE_PAGE_ALLOCATOR
bool "Page allocator randomization"
@@ -367,7 +343,7 @@ config SHUFFLE_PAGE_ALLOCATOR
the presence of a memory-side-cache. There are also incidental
security benefits as it reduces the predictability of page
allocations to compliment SLAB_FREELIST_RANDOM, but the
- default granularity of shuffling on the MAX_ORDER i.e, 10th
+ default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
order of pages is selected based on cache utilization benefits
on x86.
@@ -699,8 +675,8 @@ config HUGETLB_PAGE_SIZE_VARIABLE
HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
on a platform.
- Note that the pageblock_order cannot exceed MAX_ORDER and will be
- clamped down to MAX_ORDER.
+ Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
+ clamped down to MAX_PAGE_ORDER.
config CONTIG_ALLOC
def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
@@ -756,7 +732,7 @@ config DEFAULT_MMAP_MIN_ADDR
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
- For most ia64, ppc64 and x86 users with lots of address space
+ For most ppc64 and x86 users with lots of address space
a value of 65536 is reasonable and should cause no problems.
On arm and other archs it should not be higher than 32768.
Programs which use vm86 functionality or have some need to map
@@ -859,6 +835,12 @@ choice
madvise(MADV_HUGEPAGE) but it won't risk to increase the
memory footprint of applications without a guaranteed
benefit.
+
+ config TRANSPARENT_HUGEPAGE_NEVER
+ bool "never"
+ help
+ Disable Transparent Hugepage by default. It can still be
+ enabled at runtime via sysfs.
endchoice
config THP_SWAP
@@ -1254,6 +1236,10 @@ config LRU_GEN_STATS
from evicted generations for debugging purpose.
This option has a per-memcg and per-node memory overhead.
+
+config LRU_GEN_WALKS_MMU
+ def_bool y
+ depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
# }
config ARCH_SUPPORTS_PER_VMA_LOCK
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 018a5bd2f576..321ab379994f 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -45,18 +45,10 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
Enable debug page memory allocations by default? This value
can be overridden by debug_pagealloc=off|on.
-config DEBUG_SLAB
- bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory. This can make kmalloc/kfree-intensive workloads much slower.
-
config SLUB_DEBUG
default y
bool "Enable SLUB debugging support" if EXPERT
- depends on SLUB && SYSFS && !SLUB_TINY
+ depends on SYSFS && !SLUB_TINY
select STACKDEPOT if STACKTRACE_SUPPORT
help
SLUB has extensive debug support features. Disabling these can
@@ -66,7 +58,7 @@ config SLUB_DEBUG
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG
+ depends on SLUB_DEBUG
select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
default n
help
@@ -231,8 +223,8 @@ config DEBUG_KMEMLEAK
allocations. See Documentation/dev-tools/kmemleak.rst for more
details.
- Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
- of finding leaks due to the slab objects poisoning.
+ Enabling SLUB_DEBUG may increase the chances of finding leaks
+ due to the slab objects poisoning.
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
diff --git a/mm/Makefile b/mm/Makefile
index 33873c8aedb3..e4b5b75aaec9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -4,7 +4,6 @@
#
KASAN_SANITIZE_slab_common.o := n
-KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n
KCSAN_SANITIZE_kmemleak.o := n
@@ -12,7 +11,6 @@ KCSAN_SANITIZE_kmemleak.o := n
# the same word but accesses to different bits of that word. Re-enable KCSAN
# for these when we have more consensus on what to do about them.
KCSAN_SANITIZE_slab_common.o := n
-KCSAN_SANITIZE_slab.o := n
KCSAN_SANITIZE_slub.o := n
KCSAN_SANITIZE_page_alloc.o := n
# But enable explicit instrumentation for memory barriers.
@@ -22,7 +20,6 @@ KCSAN_INSTRUMENT_BARRIERS := y
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
# free pages, or a task is migrated between nodes.
KCOV_INSTRUMENT_slab_common.o := n
-KCOV_INSTRUMENT_slab.o := n
KCOV_INSTRUMENT_slub.o := n
KCOV_INSTRUMENT_page_alloc.o := n
KCOV_INSTRUMENT_debug-pagealloc.o := n
@@ -66,6 +63,7 @@ obj-y += page-alloc.o
obj-y += init-mm.o
obj-y += memblock.o
obj-y += $(memory-hotplug-y)
+obj-y += slub.o
ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
@@ -82,8 +80,6 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += page_poison.o
-obj-$(CONFIG_SLAB) += slab.o
-obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_KFENCE) += kfence/
obj-$(CONFIG_KMSAN) += kmsan/
diff --git a/mm/cma.c b/mm/cma.c
index 2b2494fd6b59..7c09c47e530b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -244,7 +244,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
- int ret = 0;
+ int ret;
/*
* We can't use __pa(high_memory) directly, since high_memory
diff --git a/mm/compaction.c b/mm/compaction.c
index 01ba298739dd..27ada42924d5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -999,7 +999,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow.
*/
- if (freepage_order > 0 && freepage_order <= MAX_ORDER) {
+ if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << freepage_order) - 1;
nr_scanned += (1UL << freepage_order) - 1;
}
@@ -1017,7 +1017,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page);
- if (likely(order <= MAX_ORDER)) {
+ if (likely(order <= MAX_PAGE_ORDER)) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
@@ -1611,6 +1611,9 @@ static void fast_isolate_freepages(struct compact_control *cc)
min(pageblock_end_pfn(min_pfn),
zone_end_pfn(cc->zone)),
cc->zone);
+ if (page && !suitable_migration_target(cc, page))
+ page = NULL;
+
cc->free_pfn = min_pfn;
}
}
@@ -2226,7 +2229,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
- for (order = cc->order; order <= MAX_ORDER; order++) {
+ for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 649adf91ebc5..0cee634f3544 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -4,7 +4,7 @@
*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_KUNIT_TEST
@@ -122,18 +122,25 @@ static void damon_test_split_at(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
- struct damon_region *r;
+ struct damon_region *r, *r_new;
t = damon_new_target();
r = damon_new_region(0, 100);
+ r->nr_accesses_bp = 420000;
+ r->nr_accesses = 42;
+ r->last_nr_accesses = 15;
damon_add_region(r, t);
damon_split_region_at(t, r, 25);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
- r = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r->ar.start, 25ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 100ul);
+ r_new = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
+ KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
+
+ KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
+ KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
damon_free_target(t);
damon_destroy_ctx(c);
@@ -295,6 +302,16 @@ static void damon_test_set_regions(struct kunit *test)
damon_destroy_target(t);
}
+static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
+{
+ struct damon_attrs attrs = {
+ .sample_interval = 10,
+ .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
+ };
+
+ KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
+}
+
static void damon_test_update_monitoring_result(struct kunit *test)
{
struct damon_attrs old_attrs = {
@@ -439,6 +456,37 @@ static void damos_test_filter_out(struct kunit *test)
damos_free_filter(f);
}
+static void damon_test_feed_loop_next_input(struct kunit *test)
+{
+ unsigned long last_input = 900000, current_score = 200;
+
+ /*
+ * If current score is lower than the goal, which is always 10,000
+ * (read the comment on damon_feed_loop_next_input()'s comment), next
+ * input should be higher than the last input.
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * If current score is higher than the goal, next input should be lower
+ * than the last input.
+ */
+ current_score = 250000000;
+ KUNIT_EXPECT_LT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * The next input depends on the distance between the current score and
+ * the goal
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, 200),
+ damon_feed_loop_next_input(last_input, 2000));
+}
+
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
@@ -449,11 +497,13 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
KUNIT_CASE(damon_test_set_regions),
+ KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
KUNIT_CASE(damon_test_update_monitoring_result),
KUNIT_CASE(damon_test_set_attrs),
KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
KUNIT_CASE(damos_test_filter_out),
+ KUNIT_CASE(damon_test_feed_loop_next_input),
{},
};
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 3a05e71509b9..36f6f1d21ff0 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -2,7 +2,7 @@
/*
* Data Access Monitor
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon: " fmt
@@ -1043,26 +1043,76 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
}
}
-/* Shouldn't be called if quota->ms and quota->sz are zero */
+/*
+ * damon_feed_loop_next_input() - get next input to achieve a target score.
+ * @last_input The last input.
+ * @score Current score that made with @last_input.
+ *
+ * Calculate next input to achieve the target score, based on the last input
+ * and current score. Assuming the input and the score are positively
+ * proportional, calculate how much compensation should be added to or
+ * subtracted from the last input as a proportion of the last input. Avoid
+ * next input always being zero by setting it non-zero always. In short form
+ * (assuming support of float and signed calculations), the algorithm is as
+ * below.
+ *
+ * next_input = max(last_input * ((goal - current) / goal + 1), 1)
+ *
+ * For simple implementation, we assume the target score is always 10,000. The
+ * caller should adjust @score for this.
+ *
+ * Returns next input that assumed to achieve the target score.
+ */
+static unsigned long damon_feed_loop_next_input(unsigned long last_input,
+ unsigned long score)
+{
+ const unsigned long goal = 10000;
+ unsigned long score_goal_diff = max(goal, score) - min(goal, score);
+ unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
+ unsigned long compensation = last_input * score_goal_diff_bp / 10000;
+ /* Set minimum input as 10000 to avoid compensation be zero */
+ const unsigned long min_input = 10000;
+
+ if (goal > score)
+ return last_input + compensation;
+ if (last_input > compensation + min_input)
+ return last_input - compensation;
+ return min_input;
+}
+
+/* Shouldn't be called if quota->ms, quota->sz, and quota->get_score unset */
static void damos_set_effective_quota(struct damos_quota *quota)
{
unsigned long throughput;
unsigned long esz;
- if (!quota->ms) {
+ if (!quota->ms && !quota->get_score) {
quota->esz = quota->sz;
return;
}
- if (quota->total_charged_ns)
- throughput = quota->total_charged_sz * 1000000 /
- quota->total_charged_ns;
- else
- throughput = PAGE_SIZE * 1024;
- esz = throughput * quota->ms;
+ if (quota->get_score) {
+ quota->esz_bp = damon_feed_loop_next_input(
+ max(quota->esz_bp, 10000UL),
+ quota->get_score(quota->get_score_arg));
+ esz = quota->esz_bp / 10000;
+ }
+
+ if (quota->ms) {
+ if (quota->total_charged_ns)
+ throughput = quota->total_charged_sz * 1000000 /
+ quota->total_charged_ns;
+ else
+ throughput = PAGE_SIZE * 1024;
+ if (quota->get_score)
+ esz = min(throughput * quota->ms, esz);
+ else
+ esz = throughput * quota->ms;
+ }
if (quota->sz && quota->sz < esz)
esz = quota->sz;
+
quota->esz = esz;
}
@@ -1074,7 +1124,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
unsigned long cumulated_sz;
unsigned int score, max_score = 0;
- if (!quota->ms && !quota->sz)
+ if (!quota->ms && !quota->sz && !quota->get_score)
return;
/* New charge window starts */
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h
index 0bb0d532b159..2d85217f5ba4 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/dbgfs-test.h
@@ -2,7 +2,7 @@
/*
* DAMON Debugfs Interface Unit Tests
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index dc0ea1fc30ca..7dac24e69e3b 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -2,7 +2,7 @@
/*
* DAMON Debugfs Interface
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon-dbgfs: " fmt
diff --git a/mm/damon/modules-common.c b/mm/damon/modules-common.c
index b2381a8466ec..7cf96574cde7 100644
--- a/mm/damon/modules-common.c
+++ b/mm/damon/modules-common.c
@@ -2,7 +2,7 @@
/*
* Common Primitives for DAMON Modules
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#include <linux/damon.h>
diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h
index 5ff081226e28..4c37a166eb81 100644
--- a/mm/damon/sysfs-common.h
+++ b/mm/damon/sysfs-common.h
@@ -56,3 +56,6 @@ int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx);
int damon_sysfs_schemes_clear_regions(
struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx);
+
+void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index fe0fe2562000..8dbaac6e5c2d 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -821,6 +821,203 @@ static const struct kobj_type damon_sysfs_watermarks_ktype = {
};
/*
+ * quota goal directory
+ */
+
+struct damos_sysfs_quota_goal {
+ struct kobject kobj;
+ unsigned long target_value;
+ unsigned long current_value;
+};
+
+static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void)
+{
+ return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL);
+}
+
+static ssize_t target_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->target_value);
+}
+
+static ssize_t target_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+ int err = kstrtoul(buf, 0, &goal->target_value);
+
+ return err ? err : count;
+}
+
+static ssize_t current_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->current_value);
+}
+
+static ssize_t current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+ int err = kstrtoul(buf, 0, &goal->current_value);
+
+ /* feed callback should check existence of this file and read value */
+ return err ? err : count;
+}
+
+static void damos_sysfs_quota_goal_release(struct kobject *kobj)
+{
+ /* or, notify this release to the feed callback */
+ kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj));
+}
+
+static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr =
+ __ATTR_RW_MODE(target_value, 0600);
+
+static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr =
+ __ATTR_RW_MODE(current_value, 0600);
+
+static struct attribute *damos_sysfs_quota_goal_attrs[] = {
+ &damos_sysfs_quota_goal_target_value_attr.attr,
+ &damos_sysfs_quota_goal_current_value_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damos_sysfs_quota_goal);
+
+static const struct kobj_type damos_sysfs_quota_goal_ktype = {
+ .release = damos_sysfs_quota_goal_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damos_sysfs_quota_goal_groups,
+};
+
+/*
+ * quota goals directory
+ */
+
+struct damos_sysfs_quota_goals {
+ struct kobject kobj;
+ struct damos_sysfs_quota_goal **goals_arr; /* counted by nr */
+ int nr;
+};
+
+static struct damos_sysfs_quota_goals *damos_sysfs_quota_goals_alloc(void)
+{
+ return kzalloc(sizeof(struct damos_sysfs_quota_goals), GFP_KERNEL);
+}
+
+static void damos_sysfs_quota_goals_rm_dirs(
+ struct damos_sysfs_quota_goals *goals)
+{
+ struct damos_sysfs_quota_goal **goals_arr = goals->goals_arr;
+ int i;
+
+ for (i = 0; i < goals->nr; i++)
+ kobject_put(&goals_arr[i]->kobj);
+ goals->nr = 0;
+ kfree(goals_arr);
+ goals->goals_arr = NULL;
+}
+
+static int damos_sysfs_quota_goals_add_dirs(
+ struct damos_sysfs_quota_goals *goals, int nr_goals)
+{
+ struct damos_sysfs_quota_goal **goals_arr, *goal;
+ int err, i;
+
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ if (!nr_goals)
+ return 0;
+
+ goals_arr = kmalloc_array(nr_goals, sizeof(*goals_arr),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!goals_arr)
+ return -ENOMEM;
+ goals->goals_arr = goals_arr;
+
+ for (i = 0; i < nr_goals; i++) {
+ goal = damos_sysfs_quota_goal_alloc();
+ if (!goal) {
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ return -ENOMEM;
+ }
+
+ err = kobject_init_and_add(&goal->kobj,
+ &damos_sysfs_quota_goal_ktype, &goals->kobj,
+ "%d", i);
+ if (err) {
+ kobject_put(&goal->kobj);
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ return err;
+ }
+
+ goals_arr[i] = goal;
+ goals->nr++;
+ }
+ return 0;
+}
+
+static ssize_t nr_goals_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goals *goals = container_of(kobj,
+ struct damos_sysfs_quota_goals, kobj);
+
+ return sysfs_emit(buf, "%d\n", goals->nr);
+}
+
+static ssize_t nr_goals_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goals *goals;
+ int nr, err = kstrtoint(buf, 0, &nr);
+
+ if (err)
+ return err;
+ if (nr < 0)
+ return -EINVAL;
+
+ goals = container_of(kobj, struct damos_sysfs_quota_goals, kobj);
+
+ if (!mutex_trylock(&damon_sysfs_lock))
+ return -EBUSY;
+ err = damos_sysfs_quota_goals_add_dirs(goals, nr);
+ mutex_unlock(&damon_sysfs_lock);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static void damos_sysfs_quota_goals_release(struct kobject *kobj)
+{
+ kfree(container_of(kobj, struct damos_sysfs_quota_goals, kobj));
+}
+
+static struct kobj_attribute damos_sysfs_quota_goals_nr_attr =
+ __ATTR_RW_MODE(nr_goals, 0600);
+
+static struct attribute *damos_sysfs_quota_goals_attrs[] = {
+ &damos_sysfs_quota_goals_nr_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damos_sysfs_quota_goals);
+
+static const struct kobj_type damos_sysfs_quota_goals_ktype = {
+ .release = damos_sysfs_quota_goals_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damos_sysfs_quota_goals_groups,
+};
+
+/*
* scheme/weights directory
*/
@@ -938,6 +1135,7 @@ static const struct kobj_type damon_sysfs_weights_ktype = {
struct damon_sysfs_quotas {
struct kobject kobj;
struct damon_sysfs_weights *weights;
+ struct damos_sysfs_quota_goals *goals;
unsigned long ms;
unsigned long sz;
unsigned long reset_interval_ms;
@@ -951,6 +1149,7 @@ static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
{
struct damon_sysfs_weights *weights;
+ struct damos_sysfs_quota_goals *goals;
int err;
weights = damon_sysfs_weights_alloc(0, 0, 0);
@@ -959,16 +1158,35 @@ static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
&quotas->kobj, "weights");
- if (err)
+ if (err) {
kobject_put(&weights->kobj);
- else
- quotas->weights = weights;
+ return err;
+ }
+ quotas->weights = weights;
+
+ goals = damos_sysfs_quota_goals_alloc();
+ if (!goals) {
+ kobject_put(&weights->kobj);
+ return -ENOMEM;
+ }
+ err = kobject_init_and_add(&goals->kobj,
+ &damos_sysfs_quota_goals_ktype, &quotas->kobj,
+ "goals");
+ if (err) {
+ kobject_put(&weights->kobj);
+ kobject_put(&goals->kobj);
+ } else {
+ quotas->goals = goals;
+ }
+
return err;
}
static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
{
kobject_put(&quotas->weights->kobj);
+ damos_sysfs_quota_goals_rm_dirs(quotas->goals);
+ kobject_put(&quotas->goals->kobj);
}
static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1650,6 +1868,50 @@ static int damon_sysfs_set_scheme_filters(struct damos *scheme,
return 0;
}
+static unsigned long damos_sysfs_get_quota_score(void *arg)
+{
+ return (unsigned long)arg;
+}
+
+static void damos_sysfs_set_quota_score(
+ struct damos_sysfs_quota_goals *sysfs_goals,
+ struct damos_quota *quota)
+{
+ struct damos_sysfs_quota_goal *sysfs_goal;
+ int i;
+
+ quota->get_score = NULL;
+ quota->get_score_arg = (void *)0;
+ for (i = 0; i < sysfs_goals->nr; i++) {
+ sysfs_goal = sysfs_goals->goals_arr[i];
+ if (!sysfs_goal->target_value)
+ continue;
+
+ /* Higher score makes scheme less aggressive */
+ quota->get_score_arg = (void *)max(
+ (unsigned long)quota->get_score_arg,
+ sysfs_goal->current_value * 10000 /
+ sysfs_goal->target_value);
+ quota->get_score = damos_sysfs_get_quota_score;
+ }
+}
+
+void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx)
+{
+ struct damos *scheme;
+ int i = 0;
+
+ damon_for_each_scheme(scheme, ctx) {
+ struct damon_sysfs_scheme *sysfs_scheme;
+
+ sysfs_scheme = sysfs_schemes->schemes_arr[i];
+ damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
+ &scheme->quota);
+ i++;
+ }
+}
+
static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_scheme *sysfs_scheme)
{
@@ -1687,6 +1949,8 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
+ damos_sysfs_set_quota_score(sysfs_quotas->goals, &quota);
+
scheme = damon_new_scheme(&pattern, sysfs_scheme->action,
sysfs_scheme->apply_interval_us, &quota, &wmarks);
if (!scheme)
@@ -1727,6 +1991,8 @@ static void damon_sysfs_update_scheme(struct damos *scheme,
scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses;
scheme->quota.weight_age = sysfs_weights->age;
+ damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota);
+
scheme->wmarks.metric = sysfs_wmarks->metric;
scheme->wmarks.interval = sysfs_wmarks->interval_us;
scheme->wmarks.high = sysfs_wmarks->high;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 7472404456aa..1f891e18b4ee 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -995,6 +995,11 @@ enum damon_sysfs_cmd {
/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
DAMON_SYSFS_CMD_COMMIT,
/*
+ * @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
+ * to DAMON.
+ */
+ DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS,
+ /*
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
* files.
*/
@@ -1025,6 +1030,7 @@ static const char * const damon_sysfs_cmd_strs[] = {
"on",
"off",
"commit",
+ "commit_schemes_quota_goals",
"update_schemes_stats",
"update_schemes_tried_bytes",
"update_schemes_tried_regions",
@@ -1351,6 +1357,24 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
kdamond->contexts->contexts_arr[0]);
}
+static int damon_sysfs_commit_schemes_quota_goals(
+ struct damon_sysfs_kdamond *sysfs_kdamond)
+{
+ struct damon_ctx *ctx;
+ struct damon_sysfs_context *sysfs_ctx;
+
+ if (!damon_sysfs_kdamond_running(sysfs_kdamond))
+ return -EINVAL;
+ /* TODO: Support multiple contexts per kdamond */
+ if (sysfs_kdamond->contexts->nr != 1)
+ return -EINVAL;
+
+ ctx = sysfs_kdamond->damon_ctx;
+ sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
+ damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
+ return 0;
+}
+
/*
* damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
* @c: The DAMON context of the callback.
@@ -1379,6 +1403,9 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active)
case DAMON_SYSFS_CMD_COMMIT:
err = damon_sysfs_commit_input(kdamond);
break;
+ case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
+ err = damon_sysfs_commit_schemes_quota_goals(kdamond);
+ break;
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
total_bytes_only = true;
fallthrough;
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index dcf1ca6b31cc..83626483f82b 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -4,7 +4,7 @@
*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index a4d1f63c5b23..381559e4a1fa 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -2,14 +2,14 @@
/*
* DAMON Primitives for Virtual Address Spaces
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon-va: " fmt
-#include <asm-generic/mman-common.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
+#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagewalk.h>
diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c
index f9d145730fd1..6755f0c9d4a3 100644
--- a/mm/debug_page_alloc.c
+++ b/mm/debug_page_alloc.c
@@ -22,7 +22,7 @@ static int __init debug_guardpage_minorder_setup(char *buf)
{
unsigned long res;
- if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
+ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_PAGE_ORDER / 2) {
pr_err("Bad debug_guardpage_minorder value\n");
return 0;
}
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index e651500e597a..5662e29fe253 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -1091,7 +1091,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
struct page *page = NULL;
#ifdef CONFIG_CONTIG_ALLOC
- if (order > MAX_ORDER) {
+ if (order > MAX_PAGE_ORDER) {
page = alloc_contig_pages((1 << order), GFP_KERNEL,
first_online_node, NULL);
if (page) {
@@ -1101,7 +1101,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
}
#endif
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
page = alloc_pages(GFP_KERNEL, order);
return page;
diff --git a/mm/dmapool.c b/mm/dmapool.c
index a151a21e571b..f0bfc6c490f4 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -36,7 +36,7 @@
#include <linux/types.h>
#include <linux/wait.h>
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#ifdef CONFIG_SLUB_DEBUG_ON
#define DMAPOOL_DEBUG 1
#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index ad5b4aa049a3..c8dafe70d4cc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -113,11 +113,11 @@
* ->i_pages lock (try_to_unmap_one)
* ->lruvec->lru_lock (follow_page->mark_page_accessed)
* ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
- * ->private_lock (page_remove_rmap->set_page_dirty)
- * ->i_pages lock (page_remove_rmap->set_page_dirty)
- * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
- * ->inode->i_lock (page_remove_rmap->set_page_dirty)
- * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
+ * ->private_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
+ * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->block_dirty_folio)
@@ -1623,7 +1623,7 @@ EXPORT_SYMBOL_GPL(__folio_lock_killable);
static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
{
struct wait_queue_head *q = folio_waitqueue(folio);
- int ret = 0;
+ int ret;
wait->folio = folio;
wait->bit_nr = PG_locked;
@@ -2173,7 +2173,7 @@ update_start:
if (nr) {
folio = fbatch->folios[nr - 1];
- *start = folio->index + folio_nr_pages(folio);
+ *start = folio_next_index(folio);
}
out:
rcu_read_unlock();
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 10c3247542cb..50412014f16f 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -46,9 +46,9 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);
-bool set_page_writeback(struct page *page)
+void set_page_writeback(struct page *page)
{
- return folio_start_writeback(page_folio(page));
+ folio_start_writeback(page_folio(page));
}
EXPORT_SYMBOL(set_page_writeback);
@@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
}
EXPORT_SYMBOL(redirty_page_for_writepage);
-void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma)
-{
- folio_add_lru_vma(page_folio(page), vma);
-}
-
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
@@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
-
-#ifdef CONFIG_MMU
-void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
- unsigned long address)
-{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
- return folio_add_new_anon_rmap((struct folio *)page, vma, address);
-}
-#endif
diff --git a/mm/gup.c b/mm/gup.c
index 231711efa390..df83182ec72d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -177,7 +177,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
/*
* Adjust the pincount before re-checking the PTE for changes.
* This is essentially a smp_mb() and is paired with a memory
- * barrier in page_try_share_anon_rmap().
+ * barrier in folio_try_share_anon_rmap_*().
*/
smp_mb__after_atomic();
@@ -710,6 +710,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
@@ -758,6 +759,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
diff --git a/mm/highmem.c b/mm/highmem.c
index e19269093a93..bd48ba445dd4 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -799,8 +799,6 @@ void set_page_address(struct page *page, void *virtual)
}
spin_unlock_irqrestore(&pas->lock, flags);
}
-
- return;
}
void __init page_address_init(void)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4f542444a91f..94ef5c02b459 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -74,12 +74,23 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
+unsigned long huge_anon_orders_always __read_mostly;
+unsigned long huge_anon_orders_madvise __read_mostly;
+unsigned long huge_anon_orders_inherit __read_mostly;
+
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Check the intersection of requested and supported orders. */
+ orders &= vma_is_anonymous(vma) ?
+ THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+ if (!orders)
+ return 0;
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs)
-{
if (!vma->vm_mm) /* vdso */
- return false;
+ return 0;
/*
* Explicitly disabled through madvise or prctl, or some
@@ -88,16 +99,16 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* */
if ((vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
+ return 0;
/*
* If the hardware/firmware marked hugepage support disabled.
*/
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
- return false;
+ return 0;
/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
if (vma_is_dax(vma))
- return in_pf;
+ return in_pf ? orders : 0;
/*
* khugepaged special VMA and hugetlb VMA.
@@ -105,17 +116,29 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* VM_MIXEDMAP set.
*/
if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
- return false;
+ return 0;
/*
- * Check alignment for file vma and size for both file and anon vma.
+ * Check alignment for file vma and size for both file and anon vma by
+ * filtering out the unsuitable orders.
*
* Skip the check for page fault. Huge fault does the check in fault
- * handlers. And this check is not suitable for huge PUD fault.
+ * handlers.
*/
- if (!in_pf &&
- !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
- return false;
+ if (!in_pf) {
+ int order = highest_order(orders);
+ unsigned long addr;
+
+ while (orders) {
+ addr = vma->vm_end - (PAGE_SIZE << order);
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ if (!orders)
+ return 0;
+ }
/*
* Enabled via shmem mount options or sysfs settings.
@@ -124,29 +147,33 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
*/
if (!in_pf && shmem_file(vma->vm_file))
return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags);
-
- /* Enforce sysfs THP requirements as necessary */
- if (enforce_sysfs &&
- (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
- !hugepage_flags_always())))
- return false;
+ !enforce_sysfs, vma->vm_mm, vm_flags)
+ ? orders : 0;
if (!vma_is_anonymous(vma)) {
/*
+ * Enforce sysfs THP requirements as necessary. Anonymous vmas
+ * were already handled in thp_vma_allowable_orders().
+ */
+ if (enforce_sysfs &&
+ (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
+ !hugepage_global_always())))
+ return 0;
+
+ /*
* Trust that ->huge_fault() handlers know what they are doing
* in fault path.
*/
if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
- return true;
+ return orders;
/* Only regular file is valid in collapse path */
if (((!in_pf || smaps)) && file_thp_enabled(vma))
- return true;
- return false;
+ return orders;
+ return 0;
}
if (vma_is_temporary_stack(vma))
- return false;
+ return 0;
/*
* THPeligible bit of smaps should show 1 for proper VMAs even
@@ -156,9 +183,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* the first page fault.
*/
if (!vma->anon_vma)
- return (smaps || in_pf);
+ return (smaps || in_pf) ? orders : 0;
- return true;
+ return orders;
}
static bool get_huge_zero_page(void)
@@ -412,9 +439,136 @@ static const struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
+static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
+static void thpsize_release(struct kobject *kobj);
+static DEFINE_SPINLOCK(huge_anon_orders_lock);
+static LIST_HEAD(thpsize_list);
+
+struct thpsize {
+ struct kobject kobj;
+ struct list_head node;
+ int order;
+};
+
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
+
+static ssize_t thpsize_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int order = to_thpsize(kobj)->order;
+ const char *output;
+
+ if (test_bit(order, &huge_anon_orders_always))
+ output = "[always] inherit madvise never";
+ else if (test_bit(order, &huge_anon_orders_inherit))
+ output = "always [inherit] madvise never";
+ else if (test_bit(order, &huge_anon_orders_madvise))
+ output = "always inherit [madvise] never";
+ else
+ output = "always inherit madvise [never]";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t thpsize_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int order = to_thpsize(kobj)->order;
+ ssize_t ret = count;
+
+ if (sysfs_streq(buf, "always")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_always);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "inherit")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_inherit);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "madvise")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ set_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "never")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct kobj_attribute thpsize_enabled_attr =
+ __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
+
+static struct attribute *thpsize_attrs[] = {
+ &thpsize_enabled_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group thpsize_attr_group = {
+ .attrs = thpsize_attrs,
+};
+
+static const struct kobj_type thpsize_ktype = {
+ .release = &thpsize_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct thpsize *thpsize_create(int order, struct kobject *parent)
+{
+ unsigned long size = (PAGE_SIZE << order) / SZ_1K;
+ struct thpsize *thpsize;
+ int ret;
+
+ thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
+ if (!thpsize)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
+ "hugepages-%lukB", size);
+ if (ret) {
+ kfree(thpsize);
+ return ERR_PTR(ret);
+ }
+
+ ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
+ if (ret) {
+ kobject_put(&thpsize->kobj);
+ return ERR_PTR(ret);
+ }
+
+ thpsize->order = order;
+ return thpsize;
+}
+
+static void thpsize_release(struct kobject *kobj)
+{
+ kfree(to_thpsize(kobj));
+}
+
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
int err;
+ struct thpsize *thpsize;
+ unsigned long orders;
+ int order;
+
+ /*
+ * Default to setting PMD-sized THP to inherit the global setting and
+ * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
+ * constant so we have to do this here.
+ */
+ huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
@@ -434,8 +588,24 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
goto remove_hp_group;
}
+ orders = THP_ORDERS_ALL_ANON;
+ order = highest_order(orders);
+ while (orders) {
+ thpsize = thpsize_create(order, *hugepage_kobj);
+ if (IS_ERR(thpsize)) {
+ pr_err("failed to create thpsize for order %d\n", order);
+ err = PTR_ERR(thpsize);
+ goto remove_all;
+ }
+ list_add(&thpsize->node, &thpsize_list);
+ order = next_order(&orders, order);
+ }
+
return 0;
+remove_all:
+ hugepage_exit_sysfs(*hugepage_kobj);
+ return err;
remove_hp_group:
sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
delete_obj:
@@ -445,6 +615,13 @@ delete_obj:
static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
+ struct thpsize *thpsize, *tmp;
+
+ list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
+ list_del(&thpsize->node);
+ kobject_put(&thpsize->kobj);
+ }
+
sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
kobject_put(hugepage_kobj);
@@ -505,7 +682,7 @@ static int __init hugepage_init(void)
/*
* hugepages can't be allocated by the buddy allocator
*/
- MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
+ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
/*
* we use page->mapping and page->index in second tail page
* as list_head: assuming THP order >= 2
@@ -811,7 +988,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- if (!transhuge_vma_suitable(vma, haddr))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
@@ -1098,6 +1275,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
+ struct folio *src_folio;
pmd_t pmd;
pgtable_t pgtable = NULL;
int ret = -ENOMEM;
@@ -1164,11 +1342,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ src_folio = page_folio(src_page);
- get_page(src_page);
- if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
+ folio_get(src_folio);
+ if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
/* Page maybe pinned: split and retry the fault on PTEs. */
- put_page(src_page);
+ folio_put(src_folio);
pte_free(dst_mm, pgtable);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
@@ -1277,8 +1456,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
/*
- * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
- * and split if duplicating fails.
+ * TODO: once we support anonymous pages, use
+ * folio_try_dup_anon_rmap_*() and split if duplicating fails.
*/
pudp_set_wrprotect(src_mm, addr, src_pud);
pud = pud_mkold(pud_wrprotect(pud));
@@ -1721,7 +1900,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (pmd_present(orig_pmd)) {
page = pmd_page(orig_pmd);
- page_remove_rmap(page, vma, true);
+ folio_remove_rmap_pmd(page_folio(page), page, vma);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
@@ -1964,6 +2143,128 @@ unlock:
return ret;
}
+#ifdef CONFIG_USERFAULTFD
+/*
+ * The PT lock for src_pmd and the mmap_lock for reading are held by
+ * the caller, but it must return after releasing the page_table_lock.
+ * Just move the page from src_pmd to dst_pmd if possible.
+ * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
+ * repeated by the caller, or other errors in case of failure.
+ */
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr)
+{
+ pmd_t _dst_pmd, src_pmdval;
+ struct page *src_page;
+ struct folio *src_folio;
+ struct anon_vma *src_anon_vma;
+ spinlock_t *src_ptl, *dst_ptl;
+ pgtable_t src_pgtable;
+ struct mmu_notifier_range range;
+ int err = 0;
+
+ src_pmdval = *src_pmd;
+ src_ptl = pmd_lockptr(mm, src_pmd);
+
+ lockdep_assert_held(src_ptl);
+ mmap_assert_locked(mm);
+
+ /* Sanity checks before the operation */
+ if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
+ WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
+ spin_unlock(src_ptl);
+ return -EINVAL;
+ }
+
+ if (!pmd_trans_huge(src_pmdval)) {
+ spin_unlock(src_ptl);
+ if (is_pmd_migration_entry(src_pmdval)) {
+ pmd_migration_entry_wait(mm, &src_pmdval);
+ return -EAGAIN;
+ }
+ return -ENOENT;
+ }
+
+ src_page = pmd_page(src_pmdval);
+ if (unlikely(!PageAnonExclusive(src_page))) {
+ spin_unlock(src_ptl);
+ return -EBUSY;
+ }
+
+ src_folio = page_folio(src_page);
+ folio_get(src_folio);
+ spin_unlock(src_ptl);
+
+ flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
+ src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+
+ folio_lock(src_folio);
+
+ /*
+ * split_huge_page walks the anon_vma chain without the page
+ * lock. Serialize against it with the anon_vma lock, the page
+ * lock is not enough.
+ */
+ src_anon_vma = folio_get_anon_vma(src_folio);
+ if (!src_anon_vma) {
+ err = -EAGAIN;
+ goto unlock_folio;
+ }
+ anon_vma_lock_write(src_anon_vma);
+
+ dst_ptl = pmd_lockptr(mm, dst_pmd);
+ double_pt_lock(src_ptl, dst_ptl);
+ if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
+ !pmd_same(*dst_pmd, dst_pmdval))) {
+ err = -EAGAIN;
+ goto unlock_ptls;
+ }
+ if (folio_maybe_dma_pinned(src_folio) ||
+ !PageAnonExclusive(&src_folio->page)) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
+ WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ folio_move_anon_rmap(src_folio, dst_vma);
+ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
+ src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+ /* Folio got pinned from under us. Put it back and fail the move. */
+ if (folio_maybe_dma_pinned(src_folio)) {
+ set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
+ /* Follow mremap() behavior and treat the entry dirty after the move */
+ _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+ set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
+
+ src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
+ pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
+unlock_ptls:
+ double_pt_unlock(src_ptl, dst_ptl);
+ anon_vma_unlock_write(src_anon_vma);
+ put_anon_vma(src_anon_vma);
+unlock_folio:
+ /* unblock rmap walks */
+ folio_unlock(src_folio);
+ mmu_notifier_invalidate_range_end(&range);
+ folio_put(src_folio);
+ return err;
+}
+#endif /* CONFIG_USERFAULTFD */
+
/*
* Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
*
@@ -2099,6 +2400,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long haddr, bool freeze)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
struct page *page;
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
@@ -2133,12 +2435,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
page = pfn_swap_entry_to_page(entry);
} else {
page = pmd_page(old_pmd);
- if (!PageDirty(page) && pmd_dirty(old_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(old_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, vma, true);
- put_page(page);
+ folio = page_folio(page);
+ if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
+ folio_set_dirty(folio);
+ if (!folio_test_referenced(folio) && pmd_young(old_pmd))
+ folio_set_referenced(folio);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
}
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
@@ -2194,16 +2497,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
page = pmd_page(old_pmd);
+ folio = page_folio(page);
if (pmd_dirty(old_pmd)) {
dirty = true;
- SetPageDirty(page);
+ folio_set_dirty(folio);
}
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
uffd_wp = pmd_uffd_wp(old_pmd);
- VM_BUG_ON_PAGE(!page_count(page), page);
+ VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
/*
* Without "freeze", we'll simply split the PMD, propagating the
@@ -2218,13 +2523,21 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* In case we cannot clear PageAnonExclusive(), split the PMD
* only and let try_to_migrate_one() fail later.
*
- * See page_try_share_anon_rmap(): invalidate PMD first.
+ * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
*/
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
- if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
+ anon_exclusive = PageAnonExclusive(page);
+ if (freeze && anon_exclusive &&
+ folio_try_share_anon_rmap_pmd(folio, page))
freeze = false;
- if (!freeze)
- page_ref_add(page, HPAGE_PMD_NR - 1);
+ if (!freeze) {
+ rmap_t rmap_flags = RMAP_NONE;
+
+ folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ if (anon_exclusive)
+ rmap_flags |= RMAP_EXCLUSIVE;
+ folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
+ vma, haddr, rmap_flags);
+ }
}
/*
@@ -2267,8 +2580,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
if (write)
entry = pte_mkwrite(entry, vma);
- if (anon_exclusive)
- SetPageAnonExclusive(page + i);
if (!young)
entry = pte_mkold(entry);
/* NOTE: this may set soft-dirty too on some archs */
@@ -2278,7 +2589,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_mksoft_dirty(entry);
if (uffd_wp)
entry = pte_mkuffd_wp(entry);
- page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
}
VM_BUG_ON(!pte_none(ptep_get(pte)));
set_pte_at(mm, addr, pte, entry);
@@ -2287,7 +2597,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pte_unmap(pte - 1);
if (!pmd_migration)
- page_remove_rmap(page, vma, true);
+ folio_remove_rmap_pmd(folio, page, vma);
if (freeze)
put_page(page);
@@ -2379,7 +2689,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_folio(struct folio *folio)
{
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
- TTU_SYNC;
+ TTU_SYNC | TTU_BATCH_FLUSH;
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
@@ -2392,6 +2702,8 @@ static void unmap_folio(struct folio *folio)
try_to_migrate(folio, ttu_flags);
else
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
+
+ try_to_unmap_flush();
}
static void remap_page(struct folio *folio, unsigned long nr)
@@ -2507,13 +2819,13 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
clear_compound_head(page_tail);
/* Finally unfreeze refcount. Additional reference from page cache. */
- page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
- PageSwapCache(head)));
+ page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
+ folio_test_swapcache(folio)));
- if (page_is_young(head))
- set_page_young(page_tail);
- if (page_is_idle(head))
- set_page_idle(page_tail);
+ if (folio_test_young(folio))
+ folio_set_young(new_folio);
+ if (folio_test_idle(folio))
+ folio_set_idle(new_folio);
folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
@@ -2823,7 +3135,7 @@ void folio_undo_large_rmappable(struct folio *folio)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
- list_del(&folio->_deferred_list);
+ list_del_init(&folio->_deferred_list);
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
@@ -3228,6 +3540,7 @@ late_initcall(split_huge_pages_debugfs);
int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
@@ -3242,15 +3555,15 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
- /* See page_try_share_anon_rmap(): invalidate PMD first. */
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
- if (anon_exclusive && page_try_share_anon_rmap(page)) {
+ /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
+ if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
set_pmd_at(mm, address, pvmw->pmd, pmdval);
return -EBUSY;
}
if (pmd_dirty(pmdval))
- set_page_dirty(page);
+ folio_set_dirty(folio);
if (pmd_write(pmdval))
entry = make_writable_migration_entry(page_to_pfn(page));
else if (anon_exclusive)
@@ -3267,8 +3580,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_uffd_wp(pmdval))
pmdswp = pmd_swp_mkuffd_wp(pmdswp);
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
- page_remove_rmap(page, vma, true);
- put_page(page);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
trace_set_migration_pmd(address, pmd_val(pmdswp));
return 0;
@@ -3276,6 +3589,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
{
+ struct folio *folio = page_folio(new);
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
@@ -3287,7 +3601,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
return;
entry = pmd_to_swp_entry(*pvmw->pmd);
- get_page(new);
+ folio_get(folio);
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
@@ -3298,20 +3612,20 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
if (!is_migration_entry_young(entry))
pmde = pmd_mkold(pmde);
/* NOTE: this may contain setting soft-dirty on some archs */
- if (PageDirty(new) && is_migration_entry_dirty(entry))
+ if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
pmde = pmd_mkdirty(pmde);
- if (PageAnon(new)) {
- rmap_t rmap_flags = RMAP_COMPOUND;
+ if (folio_test_anon(folio)) {
+ rmap_t rmap_flags = RMAP_NONE;
if (!is_readable_migration_entry(entry))
rmap_flags |= RMAP_EXCLUSIVE;
- page_add_anon_rmap(new, vma, haddr, rmap_flags);
+ folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
} else {
- page_add_file_rmap(new, vma, true);
+ folio_add_file_rmap_pmd(folio, new, vma);
}
- VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
+ VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
set_pmd_at(mm, haddr, pvmw->pmd, pmde);
/* No need to invalidate - it was non-present before */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6feb3e0630d1..ed1581b670d4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1141,7 +1141,7 @@ static inline struct resv_map *inode_resv_map(struct inode *inode)
* The VERY common case is inode->mapping == &inode->i_data but,
* this may not be true for device special inodes.
*/
- return (struct resv_map *)(&inode->i_data)->private_data;
+ return (struct resv_map *)(&inode->i_data)->i_private_data;
}
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
@@ -3410,7 +3410,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
/*
* Put bootmem huge pages into the standard lists after mem_map is up.
- * Note: This only applies to gigantic (order > MAX_ORDER) pages.
+ * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
*/
static void __init gather_bootmem_prealloc(void)
{
@@ -4790,7 +4790,7 @@ static int __init default_hugepagesz_setup(char *s)
* The number of default huge pages (for this size) could have been
* specified as the first hugetlb parameter: hugepages=X. If so,
* then default_hstate_max_huge_pages is set. If the default huge
- * page size is gigantic (> MAX_ORDER), then the pages must be
+ * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
* allocated here from bootmem allocator.
*/
if (default_hstate_max_huge_pages) {
@@ -5285,7 +5285,7 @@ hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long add
pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
__folio_mark_uptodate(new_folio);
- hugepage_add_new_anon_rmap(new_folio, vma, addr);
+ hugetlb_add_new_anon_rmap(new_folio, vma, addr);
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
newpte = huge_pte_mkuffd_wp(newpte);
set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
@@ -5408,9 +5408,8 @@ again:
* sleep during the process.
*/
if (!folio_test_anon(pte_folio)) {
- page_dup_file_rmap(&pte_folio->page, true);
- } else if (page_try_dup_anon_rmap(&pte_folio->page,
- true, src_vma)) {
+ hugetlb_add_file_rmap(pte_folio);
+ } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
pte_t src_pte_old = entry;
struct folio *new_folio;
@@ -5676,7 +5675,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
make_pte_marker(PTE_MARKER_UFFD_WP),
sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- page_remove_rmap(page, vma, true);
+ hugetlb_remove_rmap(page_folio(page));
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
@@ -5987,8 +5986,8 @@ retry_avoidcopy:
/* Break COW or unshare */
huge_ptep_clear_flush(vma, haddr, ptep);
- page_remove_rmap(&old_folio->page, vma, true);
- hugepage_add_new_anon_rmap(new_folio, vma, haddr);
+ hugetlb_remove_rmap(old_folio);
+ hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte))
newpte = huge_pte_mkuffd_wp(newpte);
set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
@@ -6277,9 +6276,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
goto backout;
if (anon_rmap)
- hugepage_add_new_anon_rmap(folio, vma, haddr);
+ hugetlb_add_new_anon_rmap(folio, vma, haddr);
else
- page_dup_file_rmap(&folio->page, true);
+ hugetlb_add_file_rmap(folio);
new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
/*
@@ -6730,9 +6729,9 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out_release_unlock;
if (folio_in_pagecache)
- page_dup_file_rmap(&folio->page, true);
+ hugetlb_add_file_rmap(folio);
else
- hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
+ hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
/*
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 87818ee7f01d..da177e49d956 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -14,6 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/bootmem_info.h>
#include <linux/mmdebug.h>
+#include <linux/pagewalk.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h"
@@ -45,21 +46,14 @@ struct vmemmap_remap_walk {
unsigned long flags;
};
-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
+static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
+ struct vmemmap_remap_walk *walk)
{
pmd_t __pmd;
int i;
unsigned long addr = start;
- struct page *head;
pte_t *pgtable;
- spin_lock(&init_mm.page_table_lock);
- head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
- spin_unlock(&init_mm.page_table_lock);
-
- if (!head)
- return 0;
-
pgtable = pte_alloc_one_kernel(&init_mm);
if (!pgtable)
return -ENOMEM;
@@ -88,7 +82,7 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
/* Make pte visible before pmd. See comment in pmd_install(). */
smp_wmb();
pmd_populate_kernel(&init_mm, pmd, pgtable);
- if (flush)
+ if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, start + PMD_SIZE);
} else {
pte_free_kernel(&init_mm, pgtable);
@@ -98,123 +92,83 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
return 0;
}
-static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
+static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- pte_t *pte = pte_offset_kernel(pmd, addr);
+ int ret = 0;
+ struct page *head;
+ struct vmemmap_remap_walk *vmemmap_walk = walk->private;
+
+ /* Only splitting, not remapping the vmemmap pages. */
+ if (!vmemmap_walk->remap_pte)
+ walk->action = ACTION_CONTINUE;
+ spin_lock(&init_mm.page_table_lock);
+ head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
/*
- * The reuse_page is found 'first' in table walk before we start
- * remapping (which is calling @walk->remap_pte).
+ * Due to HugeTLB alignment requirements and the vmemmap
+ * pages being at the start of the hotplugged memory
+ * region in memory_hotplug.memmap_on_memory case. Checking
+ * the vmemmap page associated with the first vmemmap page
+ * if it is self-hosted is sufficient.
+ *
+ * [ hotplugged memory ]
+ * [ section ][...][ section ]
+ * [ vmemmap ][ usable memory ]
+ * ^ | ^ |
+ * +--+ | |
+ * +------------------------+
*/
- if (!walk->reuse_page) {
- walk->reuse_page = pte_page(ptep_get(pte));
- /*
- * Because the reuse address is part of the range that we are
- * walking, skip the reuse address range.
- */
- addr += PAGE_SIZE;
- pte++;
- walk->nr_walked++;
- }
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && unlikely(!vmemmap_walk->nr_walked)) {
+ struct page *page = head ? head + pte_index(addr) :
+ pte_page(ptep_get(pte_offset_kernel(pmd, addr)));
- for (; addr != end; addr += PAGE_SIZE, pte++) {
- walk->remap_pte(pte, addr, walk);
- walk->nr_walked++;
+ if (PageVmemmapSelfHosted(page))
+ ret = -ENOTSUPP;
}
-}
-
-static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_offset(pud, addr);
- do {
- int ret;
-
- ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK,
- !(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH));
- if (ret)
- return ret;
-
- next = pmd_addr_end(addr, end);
-
- /*
- * We are only splitting, not remapping the hugetlb vmemmap
- * pages.
- */
- if (!walk->remap_pte)
- continue;
-
- vmemmap_pte_range(pmd, addr, next, walk);
- } while (pmd++, addr = next, addr != end);
+ spin_unlock(&init_mm.page_table_lock);
+ if (!head || ret)
+ return ret;
- return 0;
+ return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
}
-static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
+static int vmemmap_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- pud_t *pud;
- unsigned long next;
+ struct vmemmap_remap_walk *vmemmap_walk = walk->private;
- pud = pud_offset(p4d, addr);
- do {
- int ret;
-
- next = pud_addr_end(addr, end);
- ret = vmemmap_pmd_range(pud, addr, next, walk);
- if (ret)
- return ret;
- } while (pud++, addr = next, addr != end);
+ /*
+ * The reuse_page is found 'first' in page table walking before
+ * starting remapping.
+ */
+ if (!vmemmap_walk->reuse_page)
+ vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
+ else
+ vmemmap_walk->remap_pte(pte, addr, vmemmap_walk);
+ vmemmap_walk->nr_walked++;
return 0;
}
-static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_offset(pgd, addr);
- do {
- int ret;
-
- next = p4d_addr_end(addr, end);
- ret = vmemmap_pud_range(p4d, addr, next, walk);
- if (ret)
- return ret;
- } while (p4d++, addr = next, addr != end);
-
- return 0;
-}
+static const struct mm_walk_ops vmemmap_remap_ops = {
+ .pmd_entry = vmemmap_pmd_entry,
+ .pte_entry = vmemmap_pte_entry,
+};
static int vmemmap_remap_range(unsigned long start, unsigned long end,
struct vmemmap_remap_walk *walk)
{
- unsigned long addr = start;
- unsigned long next;
- pgd_t *pgd;
-
- VM_BUG_ON(!PAGE_ALIGNED(start));
- VM_BUG_ON(!PAGE_ALIGNED(end));
+ int ret;
- pgd = pgd_offset_k(addr);
- do {
- int ret;
+ VM_BUG_ON(!PAGE_ALIGNED(start | end));
- next = pgd_addr_end(addr, end);
- ret = vmemmap_p4d_range(pgd, addr, next, walk);
- if (ret)
- return ret;
- } while (pgd++, addr = next, addr != end);
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &vmemmap_remap_ops,
+ NULL, walk);
+ mmap_read_unlock(&init_mm);
+ if (ret)
+ return ret;
if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, end);
@@ -328,9 +282,8 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
* Return: %0 on success, negative error code otherwise.
*/
static int vmemmap_remap_split(unsigned long start, unsigned long end,
- unsigned long reuse)
+ unsigned long reuse)
{
- int ret;
struct vmemmap_remap_walk walk = {
.remap_pte = NULL,
.flags = VMEMMAP_SPLIT_NO_TLB_FLUSH,
@@ -339,11 +292,7 @@ static int vmemmap_remap_split(unsigned long start, unsigned long end,
/* See the comment in the vmemmap_remap_free(). */
BUG_ON(start - reuse != PAGE_SIZE);
- mmap_read_lock(&init_mm);
- ret = vmemmap_remap_range(reuse, end, &walk);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return vmemmap_remap_range(reuse, end, &walk);
}
/**
@@ -406,7 +355,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
*/
BUG_ON(start - reuse != PAGE_SIZE);
- mmap_read_lock(&init_mm);
ret = vmemmap_remap_range(reuse, end, &walk);
if (ret && walk.nr_walked) {
end = reuse + walk.nr_walked * PAGE_SIZE;
@@ -425,7 +373,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
vmemmap_remap_range(reuse, end, &walk);
}
- mmap_read_unlock(&init_mm);
return ret;
}
@@ -482,11 +429,7 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
return -ENOMEM;
- mmap_read_lock(&init_mm);
- vmemmap_remap_range(reuse, end, &walk);
- mmap_read_unlock(&init_mm);
-
- return 0;
+ return vmemmap_remap_range(reuse, end, &walk);
}
DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
@@ -495,14 +438,14 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
-static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags)
+static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
+ struct folio *folio, unsigned long flags)
{
int ret;
- struct page *head = &folio->page;
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- VM_WARN_ON_ONCE(!PageHuge(head));
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
@@ -565,7 +508,7 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
if (ret)
break;
restored++;
@@ -583,9 +526,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
}
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
-static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
+static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
{
- if (HPageVmemmapOptimized((struct page *)head))
+ if (folio_test_hugetlb_vmemmap_optimized(folio))
return false;
if (!READ_ONCE(vmemmap_optimize_enabled))
@@ -594,65 +537,20 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
if (!hugetlb_vmemmap_optimizable(h))
return false;
- if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
- pmd_t *pmdp, pmd;
- struct page *vmemmap_page;
- unsigned long vaddr = (unsigned long)head;
-
- /*
- * Only the vmemmap page's vmemmap page can be self-hosted.
- * Walking the page tables to find the backing page of the
- * vmemmap page.
- */
- pmdp = pmd_off_k(vaddr);
- /*
- * The READ_ONCE() is used to stabilize *pmdp in a register or
- * on the stack so that it will stop changing under the code.
- * The only concurrent operation where it can be changed is
- * split_vmemmap_huge_pmd() (*pmdp will be stable after this
- * operation).
- */
- pmd = READ_ONCE(*pmdp);
- if (pmd_leaf(pmd))
- vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
- else
- vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
- /*
- * Due to HugeTLB alignment requirements and the vmemmap pages
- * being at the start of the hotplugged memory region in
- * memory_hotplug.memmap_on_memory case. Checking any vmemmap
- * page's vmemmap page if it is marked as VmemmapSelfHosted is
- * sufficient.
- *
- * [ hotplugged memory ]
- * [ section ][...][ section ]
- * [ vmemmap ][ usable memory ]
- * ^ | | |
- * +---+ | |
- * ^ | |
- * +-------+ |
- * ^ |
- * +-------------------------------------------+
- */
- if (PageVmemmapSelfHosted(vmemmap_page))
- return false;
- }
-
return true;
}
static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
- struct folio *folio,
- struct list_head *vmemmap_pages,
- unsigned long flags)
+ struct folio *folio,
+ struct list_head *vmemmap_pages,
+ unsigned long flags)
{
int ret = 0;
- struct page *head = &folio->page;
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- VM_WARN_ON_ONCE(!PageHuge(head));
- if (!vmemmap_should_optimize(h, head))
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (!vmemmap_should_optimize_folio(h, folio))
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
@@ -680,7 +578,7 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
* the caller.
*/
ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
- vmemmap_pages, flags);
+ vmemmap_pages, flags);
if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
folio_clear_hugetlb_vmemmap_optimized(folio);
@@ -707,12 +605,12 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
free_vmemmap_page_list(&vmemmap_pages);
}
-static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head)
+static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- if (!vmemmap_should_optimize(h, head))
+ if (!vmemmap_should_optimize_folio(h, folio))
return 0;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
@@ -732,7 +630,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
LIST_HEAD(vmemmap_pages);
list_for_each_entry(folio, folio_list, lru) {
- int ret = hugetlb_vmemmap_split(h, &folio->page);
+ int ret = hugetlb_vmemmap_split_folio(h, folio);
/*
* Spliting the PMD requires allocating a page, thus lets fail
@@ -747,9 +645,10 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) {
- int ret = __hugetlb_vmemmap_optimize_folio(h, folio,
- &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ int ret;
+
+ ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
/*
* Pages to be freed may have been accumulated. If we
@@ -763,9 +662,8 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize_folio(h, folio,
- &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index b61034bd50f5..f309a010d50f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -54,12 +54,12 @@ void page_writeback_init(void);
/*
* If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
- * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
+ * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
* above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
* leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
*/
-#define COMPOUND_MAPPED 0x800000
-#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
+#define ENTIRELY_MAPPED 0x800000
+#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
/*
* Flags passed to __show_mem() and show_free_areas() to suppress output in
@@ -138,7 +138,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
-long invalidate_inode_page(struct page *page);
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
@@ -335,7 +335,7 @@ static inline bool page_is_buddy(struct page *page, struct page *buddy,
* satisfies the following equation:
* P = B & ~(1 << O)
*
- * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
+ * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
*/
static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
@@ -616,7 +616,7 @@ folio_within_range(struct folio *folio, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgoff_t pgoff, addr;
- unsigned long vma_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long vma_pglen = vma_pages(vma);
VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
if (start > end)
@@ -650,8 +650,8 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
- * mlock is usually called at the end of page_add_*_rmap(), munlock at
- * the end of page_remove_rmap(); but new anon folios are managed by
+ * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
+ * the end of folio_remove_rmap_*(); but new anon folios are managed by
* folio_add_lru_vma() calling mlock_new_folio().
*/
void mlock_folio(struct folio *folio);
@@ -1047,7 +1047,7 @@ enum {
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
* * GUP-fast and KSM or temporary unmapping (swap, migration): see
- * page_try_share_anon_rmap()
+ * folio_try_share_anon_rmap_*()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
@@ -1090,7 +1090,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
return is_cow_mapping(vma->vm_flags);
}
- /* Paired with a memory barrier in page_try_share_anon_rmap(). */
+ /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_rmb();
@@ -1135,8 +1135,6 @@ static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
static inline void vma_iter_config(struct vma_iterator *vmi,
unsigned long index, unsigned long last)
{
- MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
- (vmi->mas.index > index || vmi->mas.last < index));
__mas_set_range(&vmi->mas, index, last - 1);
}
@@ -1154,17 +1152,6 @@ static inline void vma_iter_clear(struct vma_iterator *vmi)
mas_store_prealloc(&vmi->mas, NULL);
}
-static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
- unsigned long start, unsigned long end, gfp_t gfp)
-{
- __mas_set_range(&vmi->mas, start, end - 1);
- mas_store_gfp(&vmi->mas, NULL, gfp);
- if (unlikely(mas_is_err(&vmi->mas)))
- return -ENOMEM;
-
- return 0;
-}
-
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
return mas_walk(&vmi->mas);
@@ -1176,13 +1163,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.index > vma->vm_start)) {
pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
vmi->mas.index, vma->vm_start, vma->vm_start,
vma->vm_end, vmi->mas.index, vmi->mas.last);
}
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.last < vma->vm_start)) {
pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
@@ -1190,7 +1177,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
}
#endif
- if (vmi->mas.node != MAS_START &&
+ if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
@@ -1201,7 +1188,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
{
- if (vmi->mas.node != MAS_START &&
+ if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 256930da578a..610efae91220 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -20,8 +20,10 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -37,19 +39,35 @@ struct slab *kasan_addr_to_slab(const void *addr)
return NULL;
}
-depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
+depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, flags, can_alloc);
+ return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
}
-void kasan_set_track(struct kasan_track *track, gfp_t flags)
+void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
{
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u32 cpu = raw_smp_processor_id();
+ u64 ts_nsec = local_clock();
+
+ track->cpu = cpu;
+ track->timestamp = ts_nsec >> 3;
+#endif /* CONFIG_KASAN_EXTRA_INFO */
track->pid = current->pid;
- track->stack = kasan_save_stack(flags, true);
+ track->stack = stack;
+}
+
+void kasan_save_track(struct kasan_track *track, gfp_t flags)
+{
+ depot_stack_handle_t stack;
+
+ stack = kasan_save_stack(flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ kasan_set_track(track, stack);
}
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
@@ -69,6 +87,9 @@ EXPORT_SYMBOL(kasan_disable_current);
void __kasan_unpoison_range(const void *address, size_t size)
{
+ if (is_kfence_address(address))
+ return;
+
kasan_unpoison(address, size, false);
}
@@ -133,12 +154,12 @@ void __kasan_poison_slab(struct slab *slab)
KASAN_SLAB_REDZONE, false);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
{
kasan_unpoison(object, cache->object_size, false);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
{
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_REDZONE, false);
@@ -153,10 +174,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
* 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
* accessed after being freed. We preassign tags for objects in these
* caches as well.
- * 3. For SLAB allocator we can't preassign tags randomly since the freelist
- * is stored as an array of indexes instead of a linked list. Assign tags
- * based on objects indexes, so that objects that are next to each other
- * get different tags.
*/
static inline u8 assign_tag(struct kmem_cache *cache,
const void *object, bool init)
@@ -171,17 +188,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
return init ? KASAN_TAG_KERNEL : kasan_random_tag();
- /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
-#ifdef CONFIG_SLAB
- /* For SLAB assign tags based on the object index in the freelist. */
- return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
-#else
/*
- * For SLUB assign a random tag during slab creation, otherwise reuse
+ * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
+ * assign a random tag during slab creation, otherwise reuse
* the already assigned tag.
*/
return init ? kasan_random_tag() : get_tag(object);
-#endif
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -197,8 +209,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool quarantine, bool init)
+static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
+ unsigned long ip, bool init)
{
void *tagged_object;
@@ -208,16 +220,12 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
tagged_object = object;
object = kasan_reset_tag(object);
- if (is_kfence_address(object))
- return false;
-
- if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
- object)) {
+ if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true;
}
- /* RCU slabs could be legally used after free within the RCU period */
+ /* RCU slabs could be legally used after free within the RCU period. */
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
@@ -229,22 +237,45 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
- if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
- return false;
-
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
- return kasan_quarantine_put(cache, object);
+ return false;
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init)
{
- return ____kasan_slab_free(cache, object, ip, true, init);
+ if (is_kfence_address(object))
+ return false;
+
+ /*
+ * If the object is buggy, do not let slab put the object onto the
+ * freelist. The object will thus never be allocated again and its
+ * metadata will never get released.
+ */
+ if (poison_slab_object(cache, object, ip, init))
+ return true;
+
+ /*
+ * If the object is put into quarantine, do not let slab put the object
+ * onto the freelist for now. The object's metadata is kept until the
+ * object gets evicted from quarantine.
+ */
+ if (kasan_quarantine_put(cache, object))
+ return true;
+
+ /*
+ * If the object is not put into quarantine, it will likely be quickly
+ * reallocated. Thus, release its metadata now.
+ */
+ kasan_release_object_meta(cache, object);
+
+ /* Let slab put the object onto the freelist. */
+ return false;
}
-static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
if (!kasan_arch_is_ready())
return false;
@@ -259,40 +290,28 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
return true;
}
- /*
- * The object will be poisoned by kasan_poison_pages() or
- * kasan_slab_free_mempool().
- */
-
return false;
}
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
- ____kasan_kfree_large(ptr, ip);
+ check_page_allocation(ptr, ip);
+
+ /* The object will be poisoned by kasan_poison_pages(). */
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
+ gfp_t flags, bool init)
{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
-
/*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ * Unpoison the whole object. For kmalloc() allocations,
+ * poison_kmalloc_redzone() will do precise poisoning.
*/
- if (unlikely(!folio_test_slab(folio))) {
- if (____kasan_kfree_large(ptr, ip))
- return;
- kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
- } else {
- struct slab *slab = folio_slab(folio);
+ kasan_unpoison(object, cache->object_size, init);
- ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
- }
+ /* Save alloc info (if possible) for non-kmalloc() allocations. */
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
+ kasan_save_alloc_info(cache, object, flags);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
@@ -317,39 +336,18 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
tag = assign_tag(cache, object, false);
tagged_object = set_tag(object, tag);
- /*
- * Unpoison the whole object.
- * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
- */
- kasan_unpoison(tagged_object, cache->object_size, init);
-
- /* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
- kasan_save_alloc_info(cache, tagged_object, flags);
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(cache, tagged_object, flags, init);
return tagged_object;
}
-static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(object == NULL))
- return NULL;
-
- if (is_kfence_address(kasan_reset_tag(object)))
- return (void *)object;
-
- /*
- * The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -373,34 +371,34 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
- /* Keep the tag that was set by kasan_slab_alloc(). */
- return (void *)object;
}
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags);
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(object))
+ return (void *)object;
+
+ /* The object has already been unpoisoned by kasan_slab_alloc(). */
+ poison_kmalloc_redzone(cache, object, size, flags);
+
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
EXPORT_SYMBOL(__kasan_kmalloc);
-void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(ptr == NULL))
- return NULL;
-
- /*
- * The object has already been unpoisoned by kasan_unpoison_pages() for
- * alloc_pages() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -410,12 +408,25 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
kasan_poison_last_granule(ptr, size);
/* Poison the aligned part of the redzone. */
- redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_GRANULE_SIZE);
+ redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE, false);
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+ /* The object has already been unpoisoned by kasan_unpoison_pages(). */
+ poison_kmalloc_large_redzone(ptr, size, flags);
+
+ /* Keep the tag that was set by alloc_pages(). */
return (void *)ptr;
}
@@ -423,9 +434,15 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
{
struct slab *slab;
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
+ if (is_kfence_address(object))
+ return (void *)object;
+
/*
* Unpoison the object's data.
* Part of it might already have been unpoisoned, but it's unknown
@@ -437,9 +454,91 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
- return __kasan_kmalloc_large(object, size, flags);
+ poison_kmalloc_large_redzone(object, size, flags);
else
- return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
+ poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
+
+ return (void *)object;
+}
+
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ unsigned long *ptr;
+
+ if (unlikely(PageHighMem(page)))
+ return true;
+
+ /* Bail out if allocation was excluded due to sampling. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ page_kasan_tag(page) == KASAN_TAG_KERNEL)
+ return true;
+
+ ptr = page_address(page);
+
+ if (check_page_allocation(ptr, ip))
+ return false;
+
+ kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
+
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ __kasan_unpoison_pages(page, order, false);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
+{
+ struct folio *folio = virt_to_folio(ptr);
+ struct slab *slab;
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc. Thus, the folio might not be a slab.
+ */
+ if (unlikely(!folio_test_slab(folio))) {
+ if (check_page_allocation(ptr, ip))
+ return false;
+ kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
+ return true;
+ }
+
+ if (is_kfence_address(ptr))
+ return false;
+
+ slab = folio_slab(folio);
+ return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
+{
+ struct slab *slab;
+ gfp_t flags = 0; /* Might be executing under a lock. */
+
+ slab = virt_to_slab(ptr);
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc.
+ */
+ if (unlikely(!slab)) {
+ kasan_unpoison(ptr, size, false);
+ poison_kmalloc_large_redzone(ptr, size, flags);
+ return;
+ }
+
+ if (is_kfence_address(ptr))
+ return;
+
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+
+ /* Poison the redzone and save alloc info for kmalloc() allocations. */
+ if (is_kmalloc_cache(slab->slab_cache))
+ poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
}
bool __kasan_check_byte(const void *address, unsigned long ip)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 4d837ab83f08..24c13dfb1e94 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -25,6 +25,8 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -361,6 +363,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
{
unsigned int ok_size;
unsigned int optimal_size;
+ unsigned int rem_free_meta_size;
+ unsigned int orig_alloc_meta_offset;
if (!kasan_requires_meta())
return;
@@ -378,49 +382,77 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
ok_size = *size;
- /* Add alloc meta into redzone. */
+ /* Add alloc meta into the redzone. */
cache->kasan_info.alloc_meta_offset = *size;
*size += sizeof(struct kasan_alloc_meta);
- /*
- * If alloc meta doesn't fit, don't add it.
- * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
- * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
- * larger sizes.
- */
+ /* If alloc meta doesn't fit, don't add it. */
if (*size > KMALLOC_MAX_SIZE) {
cache->kasan_info.alloc_meta_offset = 0;
*size = ok_size;
/* Continue, since free meta might still fit. */
}
+ ok_size = *size;
+ orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
+
/*
- * Add free meta into redzone when it's not possible to store
+ * Store free meta in the redzone when it's not possible to store
* it in the object. This is the case when:
* 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
* be touched after it was freed, or
* 2. Object has a constructor, which means it's expected to
- * retain its content until the next allocation, or
- * 3. Object is too small.
- * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
+ * retain its content until the next allocation.
*/
- if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
- cache->object_size < sizeof(struct kasan_free_meta)) {
- ok_size = *size;
-
+ if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta);
+ goto free_meta_added;
+ }
- /* If free meta doesn't fit, don't add it. */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- *size = ok_size;
- }
+ /*
+ * Otherwise, if the object is large enough to contain free meta,
+ * store it within the object.
+ */
+ if (sizeof(struct kasan_free_meta) <= cache->object_size) {
+ /* cache->kasan_info.free_meta_offset = 0 is implied. */
+ goto free_meta_added;
+ }
+
+ /*
+ * For smaller objects, store the beginning of free meta within the
+ * object and the end in the redzone. And thus shift the location of
+ * alloc meta to free up space for free meta.
+ * This is only possible when slub_debug is disabled, as otherwise
+ * the end of free meta will overlap with slub_debug metadata.
+ */
+ if (!__slub_debug_enabled()) {
+ rem_free_meta_size = sizeof(struct kasan_free_meta) -
+ cache->object_size;
+ *size += rem_free_meta_size;
+ if (cache->kasan_info.alloc_meta_offset != 0)
+ cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
+ goto free_meta_added;
+ }
+
+ /*
+ * If the object is small and slub_debug is enabled, store free meta
+ * in the redzone after alloc meta.
+ */
+ cache->kasan_info.free_meta_offset = *size;
+ *size += sizeof(struct kasan_free_meta);
+
+free_meta_added:
+ /* If free meta doesn't fit, don't add it. */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+ cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
+ *size = ok_size;
}
/* Calculate size with optimal redzone. */
optimal_size = cache->object_size + optimal_redzone(cache->object_size);
- /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+ /* Limit it with KMALLOC_MAX_SIZE. */
if (optimal_size > KMALLOC_MAX_SIZE)
optimal_size = KMALLOC_MAX_SIZE;
/* Use optimal size if the size with added metas is not large enough. */
@@ -450,8 +482,63 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
+ if (alloc_meta) {
+ /* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));
+
+ /*
+ * Temporarily disable KASAN bug reporting to allow instrumented
+ * raw_spin_lock_init to access aux_lock, which resides inside
+ * of a redzone.
+ */
+ kasan_disable_current();
+ raw_spin_lock_init(&alloc_meta->aux_lock);
+ kasan_enable_current();
+ }
+
+ /*
+ * Explicitly marking free meta as invalid is not required: the shadow
+ * value for the first 8 bytes of a newly allocated object is not
+ * KASAN_SLAB_FREE_META.
+ */
+}
+
+static void release_alloc_meta(struct kasan_alloc_meta *meta)
+{
+ /* Evict the stack traces from stack depot. */
+ stack_depot_put(meta->alloc_track.stack);
+ stack_depot_put(meta->aux_stack[0]);
+ stack_depot_put(meta->aux_stack[1]);
+
+ /* Zero out alloc meta to mark it as invalid. */
+ __memset(meta, 0, sizeof(*meta));
+}
+
+static void release_free_meta(const void *object, struct kasan_free_meta *meta)
+{
+ /* Check if free meta is valid. */
+ if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
+ return;
+
+ /* Evict the stack trace from the stack depot. */
+ stack_depot_put(meta->free_track.stack);
+
+ /* Mark free meta as invalid. */
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
+}
+
+void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ struct kasan_free_meta *free_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta)
+ release_alloc_meta(alloc_meta);
+
+ free_meta = kasan_get_free_meta(cache, object);
+ if (free_meta)
+ release_free_meta(object, free_meta);
}
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
@@ -472,12 +559,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, bool can_alloc)
+static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
+ depot_stack_handle_t new_handle, old_handle;
+ unsigned long flags;
if (is_kfence_address(addr) || !slab)
return;
@@ -488,18 +577,33 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc)
if (!alloc_meta)
return;
+ new_handle = kasan_save_stack(0, depot_flags);
+
+ /*
+ * Temporarily disable KASAN bug reporting to allow instrumented
+ * spinlock functions to access aux_lock, which resides inside of a
+ * redzone.
+ */
+ kasan_disable_current();
+ raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
+ old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc);
+ alloc_meta->aux_stack[0] = new_handle;
+ raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
+ kasan_enable_current();
+
+ stack_depot_put(old_handle);
}
void kasan_record_aux_stack(void *addr)
{
- return __kasan_record_aux_stack(addr, true);
+ return __kasan_record_aux_stack(addr,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
}
void kasan_record_aux_stack_noalloc(void *addr)
{
- return __kasan_record_aux_stack(addr, false);
+ return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -507,8 +611,13 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->alloc_track, flags);
+ if (!alloc_meta)
+ return;
+
+ /* Evict previous stack traces (might exist for krealloc or mempool). */
+ release_alloc_meta(alloc_meta);
+
+ kasan_save_track(&alloc_meta->alloc_track, flags);
}
void kasan_save_free_info(struct kmem_cache *cache, void *object)
@@ -519,7 +628,11 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;
- kasan_set_track(&free_meta->free_track, 0);
- /* The object was freed and has free track set. */
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
+ /* Evict previous stack trace (might exist for mempool). */
+ release_free_meta(object, free_meta);
+
+ kasan_save_track(&free_meta->free_track, 0);
+
+ /* Mark free meta as valid. */
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
}
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 06141bbc1e51..2b994092a2d4 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -57,7 +57,12 @@ enum kasan_mode kasan_mode __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to enable vmalloc tagging. */
+#ifdef CONFIG_KASAN_VMALLOC
DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
+#else
+DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
+#endif
+EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
#define PAGE_ALLOC_SAMPLE_DEFAULT 1
#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
@@ -119,6 +124,9 @@ static int __init early_kasan_flag_vmalloc(char *arg)
if (!arg)
return -EINVAL;
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ return 0;
+
if (!strcmp(arg, "off"))
kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
else if (!strcmp(arg, "on"))
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8b06bab5c406..d0f172f2b978 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -6,6 +6,7 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
+#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -48,6 +49,7 @@ DECLARE_PER_CPU(long, kasan_page_alloc_skip);
static inline bool kasan_vmalloc_enabled(void)
{
+ /* Static branch is never enabled with CONFIG_KASAN_VMALLOC disabled. */
return static_branch_likely(&kasan_flag_vmalloc);
}
@@ -81,6 +83,11 @@ static inline bool kasan_sample_page_alloc(unsigned int order)
#else /* CONFIG_KASAN_HW_TAGS */
+static inline bool kasan_vmalloc_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN_VMALLOC);
+}
+
static inline bool kasan_async_fault_possible(void)
{
return false;
@@ -100,21 +107,21 @@ static inline bool kasan_sample_page_alloc(unsigned int order)
#ifdef CONFIG_KASAN_GENERIC
-/* Generic KASAN uses per-object metadata to store stack traces. */
+/*
+ * Generic KASAN uses per-object metadata to store alloc and free stack traces
+ * and the quarantine link.
+ */
static inline bool kasan_requires_meta(void)
{
- /*
- * Technically, Generic KASAN always collects stack traces right now.
- * However, let's use kasan_stack_collection_enabled() in case the
- * kasan.stacktrace command-line argument is changed to affect
- * Generic KASAN.
- */
- return kasan_stack_collection_enabled();
+ return true;
}
#else /* CONFIG_KASAN_GENERIC */
-/* Tag-based KASAN modes do not use per-object metadata. */
+/*
+ * Tag-based KASAN modes do not use per-object metadata: they use the stack
+ * ring to store alloc and free stack traces and do not use qurantine.
+ */
static inline bool kasan_requires_meta(void)
{
return false;
@@ -149,7 +156,7 @@ static inline bool kasan_requires_meta(void)
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */
+#define KASAN_SLAB_FREE_META 0xFA /* freed slab object with free meta */
#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
/* Stack redzone shadow values. Compiler ABI, do not change. */
@@ -187,6 +194,10 @@ static inline bool kasan_requires_meta(void)
struct kasan_track {
u32 pid;
depot_stack_handle_t stack;
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u64 cpu:20;
+ u64 timestamp:44;
+#endif /* CONFIG_KASAN_EXTRA_INFO */
};
enum kasan_report_type {
@@ -242,9 +253,25 @@ struct kasan_global {
#ifdef CONFIG_KASAN_GENERIC
+/*
+ * Alloc meta contains the allocation-related information about a slab object.
+ * Alloc meta is saved when an object is allocated and is kept until either the
+ * object returns to the slab freelist (leaves quarantine for quarantined
+ * objects or gets freed for the non-quarantined ones) or reallocated via
+ * krealloc or through a mempool.
+ * Alloc meta is stored inside of the object's redzone.
+ * Alloc meta is considered valid whenever it contains non-zero data.
+ */
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
+ /*
+ * aux_lock protects aux_stack from accesses from concurrent
+ * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
+ * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
+ * non-sleepable contexts.
+ */
+ raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};
@@ -260,8 +287,12 @@ struct qlist_node {
#define KASAN_NO_FREE_META INT_MAX
/*
- * Free meta is only used by Generic mode while the object is in quarantine.
- * After that, slab allocator stores the freelist pointer in the object.
+ * Free meta contains the freeing-related information about a slab object.
+ * Free meta is only kept for quarantined objects and for mempool objects until
+ * the object gets allocated again.
+ * Free meta is stored within the object's memory.
+ * Free meta is considered valid whenever the value of the shadow byte that
+ * corresponds to the first 8 bytes of the object is KASAN_SLAB_FREE_META.
*/
struct kasan_free_meta {
struct qlist_node quarantine_link;
@@ -275,8 +306,7 @@ struct kasan_free_meta {
struct kasan_stack_ring_entry {
void *ptr;
size_t size;
- u32 pid;
- depot_stack_handle_t stack;
+ struct kasan_track track;
bool is_free;
};
@@ -291,6 +321,12 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+static __always_inline bool addr_in_shadow(const void *addr)
+{
+ return addr >= (void *)KASAN_SHADOW_START &&
+ addr < (void *)KASAN_SHADOW_END;
+}
+
#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
@@ -357,24 +393,24 @@ void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report
struct slab *kasan_addr_to_slab(const void *addr);
#ifdef CONFIG_KASAN_GENERIC
-void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
-void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
+void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
+void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else
-static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
+static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif
-depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
-void kasan_set_track(struct kasan_track *track, gfp_t flags);
+depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
+void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack);
+void kasan_save_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
void kasan_save_free_info(struct kmem_cache *cache, void *object);
-#if defined(CONFIG_KASAN_GENERIC) && \
- (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
+#ifdef CONFIG_KASAN_GENERIC
bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
void kasan_quarantine_reduce(void);
void kasan_quarantine_remove_cache(struct kmem_cache *cache);
@@ -444,35 +480,23 @@ static inline u8 kasan_random_tag(void) { return 0; }
static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
return;
- hw_set_mem_tag_range((void *)addr, size, value, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
}
static inline void kasan_unpoison(const void *addr, size_t size, bool init)
{
u8 tag = get_tag(addr);
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
size = round_up(size, KASAN_GRANULE_SIZE);
- hw_set_mem_tag_range((void *)addr, size, tag, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init);
}
static inline bool kasan_byte_accessible(const void *addr)
@@ -491,8 +515,6 @@ static inline bool kasan_byte_accessible(const void *addr)
* @size - range size, must be aligned to KASAN_GRANULE_SIZE
* @value - value that's written to metadata for the range
* @init - whether to initialize the memory range (only for hardware tag-based)
- *
- * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
*/
void kasan_poison(const void *addr, size_t size, u8 value, bool init);
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 34515a106ca5..971cfff4ca0b 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
+#include <linux/mempool.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
@@ -213,17 +214,32 @@ static void kmalloc_node_oob_right(struct kunit *test)
}
/*
- * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
- * fit into a slab cache and therefore is allocated via the page allocator
- * fallback. Since this kind of fallback is only implemented for SLUB, these
- * tests are limited to that allocator.
+ * Check that KASAN detects an out-of-bounds access for a big object allocated
+ * via kmalloc(). But not as big as to trigger the page_alloc fallback.
*/
-static void kmalloc_pagealloc_oob_right(struct kunit *test)
+static void kmalloc_big_oob_right(struct kunit *test)
{
char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+ size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ kfree(ptr);
+}
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+/*
+ * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
+ * that does not fit into the largest slab cache and therefore is allocated via
+ * the page_alloc fallback.
+ */
+
+static void kmalloc_large_oob_right(struct kunit *test)
+{
+ char *ptr;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -234,13 +250,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
kfree(ptr);
}
-static void kmalloc_pagealloc_uaf(struct kunit *test)
+static void kmalloc_large_uaf(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
@@ -248,20 +262,18 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
-static void kmalloc_pagealloc_invalid_free(struct kunit *test)
+static void kmalloc_large_invalid_free(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
-static void pagealloc_oob_right(struct kunit *test)
+static void page_alloc_oob_right(struct kunit *test)
{
char *ptr;
struct page *pages;
@@ -283,7 +295,7 @@ static void pagealloc_oob_right(struct kunit *test)
free_pages((unsigned long)ptr, order);
}
-static void pagealloc_uaf(struct kunit *test)
+static void page_alloc_uaf(struct kunit *test)
{
char *ptr;
struct page *pages;
@@ -297,23 +309,6 @@ static void pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
-static void kmalloc_large_oob_right(struct kunit *test)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
-
- /*
- * Allocate a chunk that is large enough, but still fits into a slab
- * and does not trigger the page allocator fallback in SLUB.
- */
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
- kfree(ptr);
-}
-
static void krealloc_more_oob_helper(struct kunit *test,
size_t size1, size_t size2)
{
@@ -403,20 +398,14 @@ static void krealloc_less_oob(struct kunit *test)
krealloc_less_oob_helper(test, 235, 201);
}
-static void krealloc_pagealloc_more_oob(struct kunit *test)
+static void krealloc_large_more_oob(struct kunit *test)
{
- /* page_alloc fallback in only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
KMALLOC_MAX_CACHE_SIZE + 235);
}
-static void krealloc_pagealloc_less_oob(struct kunit *test)
+static void krealloc_large_less_oob(struct kunit *test)
{
- /* page_alloc fallback in only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
KMALLOC_MAX_CACHE_SIZE + 201);
}
@@ -708,6 +697,126 @@ static void kmalloc_uaf3(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
}
+static void kmalloc_double_kzfree(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 16;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ kfree_sensitive(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
+}
+
+/* Check that ksize() does NOT unpoison whole object. */
+static void ksize_unpoisons_memory(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
+ size_t real_size;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ real_size = ksize(ptr);
+ KUNIT_EXPECT_GT(test, real_size, size);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+
+ /* These accesses shouldn't trigger a KASAN report. */
+ ptr[0] = 'x';
+ ptr[size - 1] = 'x';
+
+ /* These must trigger a KASAN report. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
+
+ kfree(ptr);
+}
+
+/*
+ * Check that a use-after-free is detected by ksize() and via normal accesses
+ * after it.
+ */
+static void ksize_uaf(struct kunit *test)
+{
+ char *ptr;
+ int size = 128 - KASAN_GRANULE_SIZE;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ kfree(ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+}
+
+/*
+ * The two tests below check that Generic KASAN prints auxiliary stack traces
+ * for RCU callbacks and workqueues. The reports need to be inspected manually.
+ *
+ * These tests are still enabled for other KASAN modes to make sure that all
+ * modes report bad accesses in tested scenarios.
+ */
+
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static void rcu_uaf_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp =
+ container_of(rp, struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ ((volatile struct kasan_rcu_info *)fp)->i;
+}
+
+static void rcu_uaf(struct kunit *test)
+{
+ struct kasan_rcu_info *ptr;
+
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ global_rcu_ptr = rcu_dereference_protected(
+ (struct kasan_rcu_info __rcu *)ptr, NULL);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
+ rcu_barrier());
+}
+
+static void workqueue_uaf_work(struct work_struct *work)
+{
+ kfree(work);
+}
+
+static void workqueue_uaf(struct kunit *test)
+{
+ struct workqueue_struct *workqueue;
+ struct work_struct *work;
+
+ workqueue = create_workqueue("kasan_workqueue_test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
+
+ work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
+
+ INIT_WORK(work, workqueue_uaf_work);
+ queue_work(workqueue, work);
+ destroy_workqueue(workqueue);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile struct work_struct *)work)->data);
+}
+
static void kfree_via_page(struct kunit *test)
{
char *ptr;
@@ -758,6 +867,69 @@ static void kmem_cache_oob(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void kmem_cache_double_free(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ kmem_cache_free(cache, p);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
+ kmem_cache_destroy(cache);
+}
+
+static void kmem_cache_invalid_free(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ /* Trigger invalid free, the object doesn't get freed. */
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
+
+ /*
+ * Properly free the object to prevent the "Objects remaining in
+ * test_cache on __kmem_cache_shutdown" BUG failure.
+ */
+ kmem_cache_free(cache, p);
+
+ kmem_cache_destroy(cache);
+}
+
+static void empty_cache_ctor(void *object) { }
+
+static void kmem_cache_double_destroy(struct kunit *test)
+{
+ struct kmem_cache *cache;
+
+ /* Provide a constructor to prevent cache merging. */
+ cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+ kmem_cache_destroy(cache);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
+}
+
static void kmem_cache_accounted(struct kunit *test)
{
int i;
@@ -810,6 +982,303 @@ static void kmem_cache_bulk(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
+{
+ int pool_size = 4;
+ int ret;
+ void *elem;
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_kmalloc_pool(pool, pool_size, size);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /*
+ * Allocate one element to prevent mempool from freeing elements to the
+ * underlying allocator and instead make it add them to the element
+ * list when the tests trigger double-free and invalid-free bugs.
+ * This allows testing KASAN annotations in add_element().
+ */
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ return elem;
+}
+
+static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
+{
+ struct kmem_cache *cache;
+ int pool_size = 4;
+ int ret;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_slab_pool(pool, pool_size, cache);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /*
+ * Do not allocate one preallocated element, as we skip the double-free
+ * and invalid-free tests for slab mempool for simplicity.
+ */
+
+ return cache;
+}
+
+static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
+{
+ int pool_size = 4;
+ int ret;
+ void *elem;
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_page_pool(pool, pool_size, order);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ return elem;
+}
+
+static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ OPTIMIZER_HIDE_VAR(elem);
+
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile char *)&elem[size])[0]);
+ else
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
+
+ mempool_free(elem, pool);
+}
+
+static void mempool_kmalloc_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_slab_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 123;
+ struct kmem_cache *cache;
+
+ cache = mempool_prepare_slab(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_exit(&pool);
+ kmem_cache_destroy(cache);
+}
+
+/*
+ * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
+ * allocations have no redzones, and thus the out-of-bounds detection is not
+ * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
+ * the tag-based KASAN modes, the neighboring allocation might have the same
+ * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
+ */
+
+static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
+{
+ char *elem, *ptr;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ mempool_free(elem, pool);
+
+ ptr = page ? page_address((struct page *)elem) : elem;
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+}
+
+static void mempool_kmalloc_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_slab_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 123;
+ struct kmem_cache *cache;
+
+ cache = mempool_prepare_slab(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_exit(&pool);
+ kmem_cache_destroy(cache);
+}
+
+static void mempool_page_alloc_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ int order = 2;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_page(test, &pool, order);
+
+ mempool_uaf_helper(test, &pool, true);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ mempool_free(elem, pool);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
+}
+
+static void mempool_kmalloc_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_page_alloc_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ int order = 2;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_page(test, &pool, order);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
+
+ mempool_free(elem, pool);
+}
+
+static void mempool_kmalloc_invalid_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_kmalloc_invalid_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_invalid_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_kmalloc_invalid_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+/*
+ * Skip the invalid-free test for page mempool. The invalid-free detection only
+ * works for compound pages and mempool preallocates all page elements without
+ * the __GFP_COMP flag.
+ */
+
static char global_array[10];
static void kasan_global_oob_right(struct kunit *test)
@@ -849,53 +1318,6 @@ static void kasan_global_oob_left(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-/* Check that ksize() does NOT unpoison whole object. */
-static void ksize_unpoisons_memory(struct kunit *test)
-{
- char *ptr;
- size_t size = 128 - KASAN_GRANULE_SIZE - 5;
- size_t real_size;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- real_size = ksize(ptr);
- KUNIT_EXPECT_GT(test, real_size, size);
-
- OPTIMIZER_HIDE_VAR(ptr);
-
- /* These accesses shouldn't trigger a KASAN report. */
- ptr[0] = 'x';
- ptr[size - 1] = 'x';
-
- /* These must trigger a KASAN report. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC))
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
-
- kfree(ptr);
-}
-
-/*
- * Check that a use-after-free is detected by ksize() and via normal accesses
- * after it.
- */
-static void ksize_uaf(struct kunit *test)
-{
- char *ptr;
- int size = 128 - KASAN_GRANULE_SIZE;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- kfree(ptr);
-
- OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
-}
-
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
@@ -938,69 +1360,6 @@ static void kasan_alloca_oob_right(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static void kmem_cache_double_free(struct kunit *test)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
-
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- kunit_err(test, "Allocation failed: %s\n", __func__);
- kmem_cache_destroy(cache);
- return;
- }
-
- kmem_cache_free(cache, p);
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
- kmem_cache_destroy(cache);
-}
-
-static void kmem_cache_invalid_free(struct kunit *test)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
- NULL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
-
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- kunit_err(test, "Allocation failed: %s\n", __func__);
- kmem_cache_destroy(cache);
- return;
- }
-
- /* Trigger invalid free, the object doesn't get freed. */
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
-
- /*
- * Properly free the object to prevent the "Objects remaining in
- * test_cache on __kmem_cache_shutdown" BUG failure.
- */
- kmem_cache_free(cache, p);
-
- kmem_cache_destroy(cache);
-}
-
-static void empty_cache_ctor(void *object) { }
-
-static void kmem_cache_double_destroy(struct kunit *test)
-{
- struct kmem_cache *cache;
-
- /* Provide a constructor to prevent cache merging. */
- cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
- kmem_cache_destroy(cache);
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
-}
-
static void kasan_memchr(struct kunit *test)
{
char *ptr;
@@ -1162,79 +1521,6 @@ static void kasan_bitops_tags(struct kunit *test)
kfree(bits);
}
-static void kmalloc_double_kzfree(struct kunit *test)
-{
- char *ptr;
- size_t size = 16;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- kfree_sensitive(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
-}
-
-/*
- * The two tests below check that Generic KASAN prints auxiliary stack traces
- * for RCU callbacks and workqueues. The reports need to be inspected manually.
- *
- * These tests are still enabled for other KASAN modes to make sure that all
- * modes report bad accesses in tested scenarios.
- */
-
-static struct kasan_rcu_info {
- int i;
- struct rcu_head rcu;
-} *global_rcu_ptr;
-
-static void rcu_uaf_reclaim(struct rcu_head *rp)
-{
- struct kasan_rcu_info *fp =
- container_of(rp, struct kasan_rcu_info, rcu);
-
- kfree(fp);
- ((volatile struct kasan_rcu_info *)fp)->i;
-}
-
-static void rcu_uaf(struct kunit *test)
-{
- struct kasan_rcu_info *ptr;
-
- ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- global_rcu_ptr = rcu_dereference_protected(
- (struct kasan_rcu_info __rcu *)ptr, NULL);
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
- rcu_barrier());
-}
-
-static void workqueue_uaf_work(struct work_struct *work)
-{
- kfree(work);
-}
-
-static void workqueue_uaf(struct kunit *test)
-{
- struct workqueue_struct *workqueue;
- struct work_struct *work;
-
- workqueue = create_workqueue("kasan_workqueue_test");
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
-
- work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
-
- INIT_WORK(work, workqueue_uaf_work);
- queue_work(workqueue, work);
- destroy_workqueue(workqueue);
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- ((volatile struct work_struct *)work)->data);
-}
-
static void vmalloc_helpers_tags(struct kunit *test)
{
void *ptr;
@@ -1244,6 +1530,9 @@ static void vmalloc_helpers_tags(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
ptr = vmalloc(PAGE_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -1278,6 +1567,9 @@ static void vmalloc_oob(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
v_ptr = vmalloc(size);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
@@ -1331,6 +1623,9 @@ static void vmap_tags(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
p_page = alloc_pages(GFP_KERNEL, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
p_ptr = page_address(p_page);
@@ -1449,7 +1744,7 @@ static void match_all_not_assigned(struct kunit *test)
free_pages((unsigned long)ptr, order);
}
- if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ if (!kasan_vmalloc_enabled())
return;
for (i = 0; i < 256; i++) {
@@ -1502,6 +1797,14 @@ static void match_all_mem_tag(struct kunit *test)
/* For each possible tag value not matching the pointer tag. */
for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
+ /*
+ * For Software Tag-Based KASAN, skip the majority of tag
+ * values to avoid the test printing too many reports.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
+ tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
+ continue;
+
if (tag == get_tag(ptr))
continue;
@@ -1521,16 +1824,16 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
KUNIT_CASE(kmalloc_node_oob_right),
- KUNIT_CASE(kmalloc_pagealloc_oob_right),
- KUNIT_CASE(kmalloc_pagealloc_uaf),
- KUNIT_CASE(kmalloc_pagealloc_invalid_free),
- KUNIT_CASE(pagealloc_oob_right),
- KUNIT_CASE(pagealloc_uaf),
+ KUNIT_CASE(kmalloc_big_oob_right),
KUNIT_CASE(kmalloc_large_oob_right),
+ KUNIT_CASE(kmalloc_large_uaf),
+ KUNIT_CASE(kmalloc_large_invalid_free),
+ KUNIT_CASE(page_alloc_oob_right),
+ KUNIT_CASE(page_alloc_uaf),
KUNIT_CASE(krealloc_more_oob),
KUNIT_CASE(krealloc_less_oob),
- KUNIT_CASE(krealloc_pagealloc_more_oob),
- KUNIT_CASE(krealloc_pagealloc_less_oob),
+ KUNIT_CASE(krealloc_large_more_oob),
+ KUNIT_CASE(krealloc_large_less_oob),
KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
@@ -1545,29 +1848,41 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_uaf_memset),
KUNIT_CASE(kmalloc_uaf2),
KUNIT_CASE(kmalloc_uaf3),
+ KUNIT_CASE(kmalloc_double_kzfree),
+ KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(ksize_uaf),
+ KUNIT_CASE(rcu_uaf),
+ KUNIT_CASE(workqueue_uaf),
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
+ KUNIT_CASE(kmem_cache_double_free),
+ KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
+ KUNIT_CASE(mempool_kmalloc_oob_right),
+ KUNIT_CASE(mempool_kmalloc_large_oob_right),
+ KUNIT_CASE(mempool_slab_oob_right),
+ KUNIT_CASE(mempool_kmalloc_uaf),
+ KUNIT_CASE(mempool_kmalloc_large_uaf),
+ KUNIT_CASE(mempool_slab_uaf),
+ KUNIT_CASE(mempool_page_alloc_uaf),
+ KUNIT_CASE(mempool_kmalloc_double_free),
+ KUNIT_CASE(mempool_kmalloc_large_double_free),
+ KUNIT_CASE(mempool_page_alloc_double_free),
+ KUNIT_CASE(mempool_kmalloc_invalid_free),
+ KUNIT_CASE(mempool_kmalloc_large_invalid_free),
KUNIT_CASE(kasan_global_oob_right),
KUNIT_CASE(kasan_global_oob_left),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
- KUNIT_CASE(ksize_unpoisons_memory),
- KUNIT_CASE(ksize_uaf),
- KUNIT_CASE(kmem_cache_double_free),
- KUNIT_CASE(kmem_cache_invalid_free),
- KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
- KUNIT_CASE(kmalloc_double_kzfree),
- KUNIT_CASE(rcu_uaf),
- KUNIT_CASE(workqueue_uaf),
KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index ca4529156735..3ba02efb952a 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -143,11 +143,9 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
- struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
- unsigned long flags;
+ struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
- if (IS_ENABLED(CONFIG_SLAB))
- local_irq_save(flags);
+ kasan_release_object_meta(cache, object);
/*
* If init_on_free is enabled and KASAN's free metadata is stored in
@@ -157,18 +155,9 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
*/
if (slab_want_init_on_free(cache) &&
cache->kasan_info.free_meta_offset == 0)
- memzero_explicit(meta, sizeof(*meta));
-
- /*
- * As the object now gets freed from the quarantine, assume that its
- * free track is no longer valid.
- */
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
+ memzero_explicit(free_meta, sizeof(*free_meta));
___cache_free(cache, object, _THIS_IP_);
-
- if (IS_ENABLED(CONFIG_SLAB))
- local_irq_restore(flags);
}
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index e77facb62900..7afa4feb03e1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -23,6 +23,7 @@
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/sched/task_stack.h>
@@ -262,7 +263,19 @@ static void print_error_description(struct kasan_report_info *info)
static void print_track(struct kasan_track *track, const char *prefix)
{
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u64 ts_nsec = track->timestamp;
+ unsigned long rem_usec;
+
+ ts_nsec <<= 3;
+ rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
+
+ pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
+ prefix, track->pid, track->cpu,
+ (unsigned long)ts_nsec, rem_usec);
+#else
pr_err("%s by task %u:\n", prefix, track->pid);
+#endif /* CONFIG_KASAN_EXTRA_INFO */
if (track->stack)
stack_depot_print(track->stack);
else
@@ -623,37 +636,43 @@ void kasan_report_async(void)
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
- * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
- * canonical half of the address space) cause out-of-bounds shadow memory reads
- * before the actual access. For addresses in the low canonical half of the
- * address space, as well as most non-canonical addresses, that out-of-bounds
- * shadow memory access lands in the non-canonical part of the address space.
- * Help the user figure out what the original bogus pointer was.
+ * With compiler-based KASAN modes, accesses to bogus pointers (outside of the
+ * mapped kernel address space regions) cause faults when KASAN tries to check
+ * the shadow memory before the actual memory access. This results in cryptic
+ * GPF reports, which are hard for users to interpret. This hook helps users to
+ * figure out what the original bogus pointer was.
*/
void kasan_non_canonical_hook(unsigned long addr)
{
unsigned long orig_addr;
const char *bug_type;
+ /*
+ * All addresses that came as a result of the memory-to-shadow mapping
+ * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET.
+ */
if (addr < KASAN_SHADOW_OFFSET)
return;
- orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
+ orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr);
+
/*
* For faults near the shadow address for NULL, we can be fairly certain
* that this is a KASAN shadow memory access.
- * For faults that correspond to shadow for low canonical addresses, we
- * can still be pretty sure - that shadow region is a fairly narrow
- * chunk of the non-canonical address space.
- * But faults that look like shadow for non-canonical addresses are a
- * really large chunk of the address space. In that case, we still
- * print the decoded address, but make it clear that this is not
- * necessarily what's actually going on.
+ * For faults that correspond to the shadow for low or high canonical
+ * addresses, we can still be pretty sure: these shadow regions are a
+ * fairly narrow chunk of the address space.
+ * But the shadow for non-canonical addresses is a really large chunk
+ * of the address space. For this case, we still print the decoded
+ * address, but make it clear that this is not necessarily what's
+ * actually going on.
*/
if (orig_addr < PAGE_SIZE)
bug_type = "null-ptr-deref";
else if (orig_addr < TASK_SIZE)
bug_type = "probably user-memory-access";
+ else if (addr_in_shadow((void *)addr))
+ bug_type = "probably wild-memory-access";
else
bug_type = "maybe wild-memory-access";
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 99cbcd73cff7..f5b8e37b3805 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -110,7 +110,7 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info)
bug_type = "use-after-free";
break;
case KASAN_SLAB_FREE:
- case KASAN_SLAB_FREETRACK:
+ case KASAN_SLAB_FREE_META:
bug_type = "slab-use-after-free";
break;
case KASAN_ALLOCA_LEFT:
@@ -173,8 +173,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
memcpy(&info->alloc_track, &alloc_meta->alloc_track,
sizeof(info->alloc_track));
- if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREETRACK) {
- /* Free meta must be present with KASAN_SLAB_FREETRACK. */
+ if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREE_META) {
+ /* Free meta must be present with KASAN_SLAB_FREE_META. */
free_meta = kasan_get_free_meta(info->cache, info->object);
memcpy(&info->free_track, &free_meta->free_track,
sizeof(info->free_track));
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 8b8bfdb3cfdb..d15f8f580e2c 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include "kasan.h"
+#include "../slab.h"
extern struct kasan_stack_ring stack_ring;
@@ -31,10 +32,6 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
unsigned long flags;
u64 pos;
struct kasan_stack_ring_entry *entry;
- void *ptr;
- u32 pid;
- depot_stack_handle_t stack;
- bool is_free;
bool alloc_found = false, free_found = false;
if ((!info->cache || !info->object) && !info->bug_type) {
@@ -61,18 +58,12 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
entry = &stack_ring.entries[i % stack_ring.size];
- /* Paired with smp_store_release() in save_stack_info(). */
- ptr = (void *)smp_load_acquire(&entry->ptr);
-
- if (kasan_reset_tag(ptr) != info->object ||
- get_tag(ptr) != get_tag(info->access_addr))
+ if (kasan_reset_tag(entry->ptr) != info->object ||
+ get_tag(entry->ptr) != get_tag(info->access_addr) ||
+ info->cache->object_size != entry->size)
continue;
- pid = READ_ONCE(entry->pid);
- stack = READ_ONCE(entry->stack);
- is_free = READ_ONCE(entry->is_free);
-
- if (is_free) {
+ if (entry->is_free) {
/*
* Second free of the same object.
* Give up on trying to find the alloc entry.
@@ -80,8 +71,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
if (free_found)
break;
- info->free_track.pid = pid;
- info->free_track.stack = stack;
+ memcpy(&info->free_track, &entry->track,
+ sizeof(info->free_track));
free_found = true;
/*
@@ -95,8 +86,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
if (alloc_found)
break;
- info->alloc_track.pid = pid;
- info->alloc_track.stack = stack;
+ memcpy(&info->alloc_track, &entry->track,
+ sizeof(info->alloc_track));
alloc_found = true;
/*
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d687f09a7ae3..9ef84f31833f 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -130,15 +130,11 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_poison_object_data) pass tagged
+ * some of the callers (e.g. kasan_poison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
@@ -149,7 +145,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
__memset(shadow_start, value, shadow_end - shadow_start);
}
-EXPORT_SYMBOL(kasan_poison);
+EXPORT_SYMBOL_GPL(kasan_poison);
#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *addr, size_t size)
@@ -170,19 +166,11 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+ * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
- /*
- * Skip KFENCE memory if called explicitly outside of sl*b. Also note
- * that calls to ksize(), where size is not a multiple of machine-word
- * size, would otherwise poison the invalid portion of the word.
- */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 7dcfe341d48e..d65d48b85f90 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -13,6 +13,8 @@
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
+#include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
#include <linux/static_key.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -96,12 +98,13 @@ static void save_stack_info(struct kmem_cache *cache, void *object,
gfp_t gfp_flags, bool is_free)
{
unsigned long flags;
- depot_stack_handle_t stack;
+ depot_stack_handle_t stack, old_stack;
u64 pos;
struct kasan_stack_ring_entry *entry;
void *old_ptr;
- stack = kasan_save_stack(gfp_flags, true);
+ stack = kasan_save_stack(gfp_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
/*
* Prevent save_stack_info() from modifying stack ring
@@ -120,17 +123,18 @@ next:
if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
goto next; /* Busy slot. */
- WRITE_ONCE(entry->size, cache->object_size);
- WRITE_ONCE(entry->pid, current->pid);
- WRITE_ONCE(entry->stack, stack);
- WRITE_ONCE(entry->is_free, is_free);
+ old_stack = entry->track.stack;
- /*
- * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
- */
- smp_store_release(&entry->ptr, (s64)object);
+ entry->size = cache->object_size;
+ kasan_set_track(&entry->track, stack);
+ entry->is_free = is_free;
+
+ entry->ptr = object;
read_unlock_irqrestore(&stack_ring.lock, flags);
+
+ if (old_stack)
+ stack_depot_put(old_stack);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 3872528d0963..8350f5c06f2e 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -463,11 +463,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
/* Set required slab fields. */
slab = virt_to_slab((void *)meta->addr);
slab->slab_cache = cache;
-#if defined(CONFIG_SLUB)
slab->objects = 1;
-#elif defined(CONFIG_SLAB)
- slab->s_mem = addr;
-#endif
/* Memory initialization. */
set_canary(meta);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 064654717843..3defe6713ef1 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -446,7 +446,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (hugepage_vma_check(vma, vm_flags, false, false, true))
+ if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+ PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -493,11 +494,6 @@ static void release_pte_folio(struct folio *folio)
folio_putback_lru(folio);
}
-static void release_pte_page(struct page *page)
-{
- release_pte_folio(page_folio(page));
-}
-
static void release_pte_pages(pte_t *pte, pte_t *_pte,
struct list_head *compound_pagelist)
{
@@ -686,6 +682,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
spinlock_t *ptl,
struct list_head *compound_pagelist)
{
+ struct folio *src_folio;
struct page *src_page;
struct page *tmp;
pte_t *_pte;
@@ -707,16 +704,17 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
}
} else {
src_page = pte_page(pteval);
- if (!PageCompound(src_page))
- release_pte_page(src_page);
+ src_folio = page_folio(src_page);
+ if (!folio_test_large(src_folio))
+ release_pte_folio(src_folio);
/*
* ptl mostly unnecessary, but preempt has to
* be disabled to update the per-cpu stats
- * inside page_remove_rmap().
+ * inside folio_remove_rmap_pte().
*/
spin_lock(ptl);
ptep_clear(vma->vm_mm, address, _pte);
- page_remove_rmap(src_page, vma, false);
+ folio_remove_rmap_pte(src_folio, src_page, vma);
spin_unlock(ptl);
free_page_and_swap_cache(src_page);
}
@@ -922,16 +920,16 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!vma)
return SCAN_VMA_NULL;
- if (!transhuge_vma_suitable(vma, address))
+ if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
- cc->is_khugepaged))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
+ cc->is_khugepaged, PMD_ORDER))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
* remapped to file after khugepaged reaquired the mmap_lock.
*
- * hugepage_vma_check may return true for qualified file
+ * thp_vma_allowable_order may return true for qualified file
* vmas.
*/
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
@@ -1089,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
+ struct folio *folio;
struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
@@ -1139,6 +1138,9 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
+ *
+ * UFFDIO_MOVE is prevented to race as well thanks to the
+ * mmap_lock.
*/
mmap_write_lock(mm);
result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
@@ -1208,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
+ folio = page_folio(hpage);
/*
- * spin_lock() below is not the equivalent of smp_wmb(), but
- * the smp_wmb() inside __SetPageUptodate() can be reused to
- * avoid the copy_huge_page writes to become visible after
- * the set_pmd_at() write.
+ * The smp_wmb() inside __folio_mark_uptodate() ensures the
+ * copy_huge_page writes become visible before the set_pmd_at()
+ * write.
*/
- __SetPageUptodate(hpage);
+ __folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@@ -1222,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
- page_add_new_anon_rmap(hpage, vma, address);
- lru_cache_add_inactive_or_unevictable(hpage, vma);
+ folio_add_new_anon_rmap(folio, vma, address);
+ folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
@@ -1503,7 +1505,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
+ PMD_ORDER))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -1619,7 +1622,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* PTE dirty? Shmem page is already dirty; file is read-only.
*/
ptep_clear(mm, addr, pte);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
nr_ptes++;
}
@@ -2119,23 +2122,23 @@ immap_locked:
xas_lock_irq(&xas);
}
- nr = thp_nr_pages(hpage);
+ folio = page_folio(hpage);
+ nr = folio_nr_pages(folio);
if (is_shmem)
- __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
else
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+ __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
if (nr_none) {
- __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+ __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
}
/*
* Mark hpage as uptodate before inserting it into the page cache so
* that it isn't mistaken for an fallocated but unwritten page.
*/
- folio = page_folio(hpage);
folio_mark_uptodate(folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1);
@@ -2145,7 +2148,7 @@ immap_locked:
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, hpage);
+ xas_store(&xas, folio);
WARN_ON_ONCE(xas_error(&xas));
xas_unlock_irq(&xas);
@@ -2156,7 +2159,7 @@ immap_locked:
retract_page_tables(mapping, start);
if (cc && !cc->is_khugepaged)
result = SCAN_PTE_MAPPED_HUGEPAGE;
- unlock_page(hpage);
+ folio_unlock(folio);
/*
* The collapse has succeeded, so free the old pages.
@@ -2368,7 +2371,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
+ true, PMD_ORDER)) {
skip:
progress++;
continue;
@@ -2492,7 +2496,7 @@ static void khugepaged_do_scan(struct collapse_control *cc)
while (true) {
cond_resched();
- if (unlikely(kthread_should_stop() || try_to_freeze()))
+ if (unlikely(kthread_should_stop()))
break;
spin_lock(&khugepaged_mm_lock);
@@ -2705,7 +2709,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
+ PMD_ORDER))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5501363d6b31..6a540c2b27c5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -14,17 +14,15 @@
* The following locks and mutexes are used by kmemleak:
*
* - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
- * del_state modifications and accesses to the object_tree_root (or
- * object_phys_tree_root). The object_list is the main list holding the
- * metadata (struct kmemleak_object) for the allocated memory blocks.
- * The object_tree_root and object_phys_tree_root are red
- * black trees used to look-up metadata based on a pointer to the
- * corresponding memory block. The object_phys_tree_root is for objects
- * allocated with physical address. The kmemleak_object structures are
- * added to the object_list and object_tree_root (or object_phys_tree_root)
- * in the create_object() function called from the kmemleak_alloc() (or
- * kmemleak_alloc_phys()) callback and removed in delete_object() called from
- * the kmemleak_free() callback
+ * del_state modifications and accesses to the object trees
+ * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
+ * object_list is the main list holding the metadata (struct
+ * kmemleak_object) for the allocated memory blocks. The object trees are
+ * red black trees used to look-up metadata based on a pointer to the
+ * corresponding memory block. The kmemleak_object structures are added to
+ * the object_list and the object tree root in the create_object() function
+ * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
+ * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
* - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
* Accesses to the metadata (e.g. count) are protected by this lock. Note
* that some members of this structure may be protected by other means
@@ -178,6 +176,8 @@ struct kmemleak_object {
#define OBJECT_FULL_SCAN (1 << 3)
/* flag set for object allocated with physical address */
#define OBJECT_PHYS (1 << 4)
+/* flag set for per-CPU pointers */
+#define OBJECT_PERCPU (1 << 5)
/* set when __remove_object() called */
#define DELSTATE_REMOVED (1 << 0)
@@ -206,6 +206,8 @@ static LIST_HEAD(mem_pool_free_list);
static struct rb_root object_tree_root = RB_ROOT;
/* search tree for object (with OBJECT_PHYS flag) boundaries */
static struct rb_root object_phys_tree_root = RB_ROOT;
+/* search tree for object (with OBJECT_PERCPU flag) boundaries */
+static struct rb_root object_percpu_tree_root = RB_ROOT;
/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
static DEFINE_RAW_SPINLOCK(kmemleak_lock);
@@ -298,7 +300,7 @@ static void hex_dump_object(struct seq_file *seq,
const u8 *ptr = (const u8 *)object->pointer;
size_t len;
- if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
return;
/* limit the number of lines to HEX_MAX_LINES */
@@ -355,16 +357,14 @@ static void print_unreferenced(struct seq_file *seq,
int i;
unsigned long *entries;
unsigned int nr_entries;
- unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
nr_entries = stack_depot_fetch(object->trace_handle, &entries);
warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
- warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
- object->comm, object->pid, object->jiffies,
- msecs_age / 1000, msecs_age % 1000);
+ warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
hex_dump_object(seq, object);
- warn_or_seq_printf(seq, " backtrace:\n");
+ warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
for (i = 0; i < nr_entries; i++) {
void *ptr = (void *)entries[i];
@@ -392,6 +392,15 @@ static void dump_object_info(struct kmemleak_object *object)
stack_depot_print(object->trace_handle);
}
+static struct rb_root *object_tree(unsigned long objflags)
+{
+ if (objflags & OBJECT_PHYS)
+ return &object_phys_tree_root;
+ if (objflags & OBJECT_PERCPU)
+ return &object_percpu_tree_root;
+ return &object_tree_root;
+}
+
/*
* Look-up a memory block metadata (kmemleak_object) in the object search
* tree based on a pointer value. If alias is 0, only values pointing to the
@@ -399,10 +408,9 @@ static void dump_object_info(struct kmemleak_object *object)
* when calling this function.
*/
static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
- struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
- object_tree_root.rb_node;
+ struct rb_node *rb = object_tree(objflags)->rb_node;
unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
while (rb) {
@@ -431,7 +439,7 @@ static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
/* Look-up a kmemleak object which allocated with virtual address. */
static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
{
- return __lookup_object(ptr, alias, false);
+ return __lookup_object(ptr, alias, 0);
}
/*
@@ -544,14 +552,14 @@ static void put_object(struct kmemleak_object *object)
* Look up an object in the object search tree and increase its use_count.
*/
static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
unsigned long flags;
struct kmemleak_object *object;
rcu_read_lock();
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __lookup_object(ptr, alias, is_phys);
+ object = __lookup_object(ptr, alias, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
/* check whether the object is still available */
@@ -565,19 +573,16 @@ static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alia
/* Look up and get an object which allocated with virtual address. */
static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
{
- return __find_and_get_object(ptr, alias, false);
+ return __find_and_get_object(ptr, alias, 0);
}
/*
- * Remove an object from the object_tree_root (or object_phys_tree_root)
- * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
- * is still enabled.
+ * Remove an object from its object tree and object_list. Must be called with
+ * the kmemleak_lock held _if_ kmemleak is still enabled.
*/
static void __remove_object(struct kmemleak_object *object)
{
- rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
- &object_phys_tree_root :
- &object_tree_root);
+ rb_erase(&object->rb_node, object_tree(object->flags));
if (!(object->del_state & DELSTATE_NO_DELETE))
list_del_rcu(&object->object_list);
object->del_state |= DELSTATE_REMOVED;
@@ -585,11 +590,11 @@ static void __remove_object(struct kmemleak_object *object)
static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
int alias,
- bool is_phys)
+ unsigned int objflags)
{
struct kmemleak_object *object;
- object = __lookup_object(ptr, alias, is_phys);
+ object = __lookup_object(ptr, alias, objflags);
if (object)
__remove_object(object);
@@ -597,19 +602,18 @@ static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
}
/*
- * Look up an object in the object search tree and remove it from both
- * object_tree_root (or object_phys_tree_root) and object_list. The
- * returned object's use_count should be at least 1, as initially set
- * by create_object().
+ * Look up an object in the object search tree and remove it from both object
+ * tree root and object_list. The returned object's use_count should be at
+ * least 1, as initially set by create_object().
*/
static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
unsigned long flags;
struct kmemleak_object *object;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __find_and_remove_object(ptr, alias, is_phys);
+ object = __find_and_remove_object(ptr, alias, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
return object;
@@ -680,7 +684,7 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp)
}
static int __link_object(struct kmemleak_object *object, unsigned long ptr,
- size_t size, int min_count, bool is_phys)
+ size_t size, int min_count, unsigned int objflags)
{
struct kmemleak_object *parent;
@@ -688,7 +692,7 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
unsigned long untagged_ptr;
unsigned long untagged_objp;
- object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
+ object->flags = OBJECT_ALLOCATED | objflags;
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->min_count = min_count;
@@ -699,12 +703,11 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
* Only update min_addr and max_addr with object
* storing virtual address.
*/
- if (!is_phys) {
+ if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
}
- link = is_phys ? &object_phys_tree_root.rb_node :
- &object_tree_root.rb_node;
+ link = &object_tree(objflags)->rb_node;
rb_parent = NULL;
while (*link) {
rb_parent = *link;
@@ -726,8 +729,7 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
}
}
rb_link_node(&object->rb_node, rb_parent, link);
- rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
- &object_tree_root);
+ rb_insert_color(&object->rb_node, object_tree(objflags));
list_add_tail_rcu(&object->object_list, &object_list);
return 0;
@@ -735,11 +737,10 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
/*
* Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root (or
- * object_phys_tree_root).
+ * memory block and add it to the object_list and object tree.
*/
static void __create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp, bool is_phys)
+ int min_count, gfp_t gfp, unsigned int objflags)
{
struct kmemleak_object *object;
unsigned long flags;
@@ -750,7 +751,7 @@ static void __create_object(unsigned long ptr, size_t size,
return;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- ret = __link_object(object, ptr, size, min_count, is_phys);
+ ret = __link_object(object, ptr, size, min_count, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
if (ret)
mem_pool_free(object);
@@ -760,14 +761,21 @@ static void __create_object(unsigned long ptr, size_t size,
static void create_object(unsigned long ptr, size_t size,
int min_count, gfp_t gfp)
{
- __create_object(ptr, size, min_count, gfp, false);
+ __create_object(ptr, size, min_count, gfp, 0);
}
/* Create kmemleak object which allocated with physical address. */
static void create_object_phys(unsigned long ptr, size_t size,
int min_count, gfp_t gfp)
{
- __create_object(ptr, size, min_count, gfp, true);
+ __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
+}
+
+/* Create kmemleak object corresponding to a per-CPU allocation. */
+static void create_object_percpu(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
+{
+ __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
}
/*
@@ -794,11 +802,11 @@ static void __delete_object(struct kmemleak_object *object)
* Look up the metadata (struct kmemleak_object) corresponding to ptr and
* delete it.
*/
-static void delete_object_full(unsigned long ptr)
+static void delete_object_full(unsigned long ptr, unsigned int objflags)
{
struct kmemleak_object *object;
- object = find_and_remove_object(ptr, 0, false);
+ object = find_and_remove_object(ptr, 0, objflags);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -814,7 +822,8 @@ static void delete_object_full(unsigned long ptr)
* delete it. If the memory block is partially freed, the function may create
* additional metadata for the remaining parts of the block.
*/
-static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
+static void delete_object_part(unsigned long ptr, size_t size,
+ unsigned int objflags)
{
struct kmemleak_object *object, *object_l, *object_r;
unsigned long start, end, flags;
@@ -828,7 +837,7 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
goto out;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __find_and_remove_object(ptr, 1, is_phys);
+ object = __find_and_remove_object(ptr, 1, objflags);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
@@ -846,11 +855,11 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
end = object->pointer + object->size;
if ((ptr > start) &&
!__link_object(object_l, start, ptr - start,
- object->min_count, is_phys))
+ object->min_count, objflags))
object_l = NULL;
if ((ptr + size < end) &&
!__link_object(object_r, ptr + size, end - ptr - size,
- object->min_count, is_phys))
+ object->min_count, objflags))
object_r = NULL;
unlock:
@@ -881,11 +890,11 @@ static void paint_it(struct kmemleak_object *object, int color)
raw_spin_unlock_irqrestore(&object->lock, flags);
}
-static void paint_ptr(unsigned long ptr, int color, bool is_phys)
+static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
{
struct kmemleak_object *object;
- object = __find_and_get_object(ptr, 0, is_phys);
+ object = __find_and_get_object(ptr, 0, objflags);
if (!object) {
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
ptr,
@@ -903,16 +912,16 @@ static void paint_ptr(unsigned long ptr, int color, bool is_phys)
*/
static void make_gray_object(unsigned long ptr)
{
- paint_ptr(ptr, KMEMLEAK_GREY, false);
+ paint_ptr(ptr, KMEMLEAK_GREY, 0);
}
/*
* Mark the object as black-colored so that it is ignored from scans and
* reporting.
*/
-static void make_black_object(unsigned long ptr, bool is_phys)
+static void make_black_object(unsigned long ptr, unsigned int objflags)
{
- paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
+ paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
}
/*
@@ -1048,8 +1057,6 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
gfp_t gfp)
{
- unsigned int cpu;
-
pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
/*
@@ -1057,9 +1064,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
* (min_count is set to 0).
*/
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- create_object((unsigned long)per_cpu_ptr(ptr, cpu),
- size, 0, gfp);
+ create_object_percpu((unsigned long)ptr, size, 0, gfp);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -1100,7 +1105,7 @@ void __ref kmemleak_free(const void *ptr)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- delete_object_full((unsigned long)ptr);
+ delete_object_full((unsigned long)ptr, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -1118,7 +1123,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- delete_object_part((unsigned long)ptr, size, false);
+ delete_object_part((unsigned long)ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -1131,14 +1136,10 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
*/
void __ref kmemleak_free_percpu(const void __percpu *ptr)
{
- unsigned int cpu;
-
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- delete_object_full((unsigned long)per_cpu_ptr(ptr,
- cpu));
+ delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -1208,7 +1209,7 @@ void __ref kmemleak_ignore(const void *ptr)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- make_black_object((unsigned long)ptr, false);
+ make_black_object((unsigned long)ptr, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
@@ -1282,7 +1283,7 @@ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
pr_debug("%s(0x%px)\n", __func__, &phys);
if (kmemleak_enabled)
- delete_object_part((unsigned long)phys, size, true);
+ delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
}
EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1296,7 +1297,7 @@ void __ref kmemleak_ignore_phys(phys_addr_t phys)
pr_debug("%s(0x%px)\n", __func__, &phys);
if (kmemleak_enabled)
- make_black_object((unsigned long)phys, true);
+ make_black_object((unsigned long)phys, OBJECT_PHYS);
}
EXPORT_SYMBOL(kmemleak_ignore_phys);
@@ -1307,7 +1308,7 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
- if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
return false;
kasan_disable_current();
@@ -1463,7 +1464,6 @@ static void scan_object(struct kmemleak_object *object)
{
struct kmemleak_scan_area *area;
unsigned long flags;
- void *obj_ptr;
/*
* Once the object->lock is acquired, the corresponding memory block
@@ -1476,14 +1476,27 @@ static void scan_object(struct kmemleak_object *object)
/* already freed object */
goto out;
- obj_ptr = object->flags & OBJECT_PHYS ?
- __va((phys_addr_t)object->pointer) :
- (void *)object->pointer;
+ if (object->flags & OBJECT_PERCPU) {
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
+ void *end = start + object->size;
- if (hlist_empty(&object->area_list) ||
+ scan_block(start, end, object);
+
+ raw_spin_unlock_irqrestore(&object->lock, flags);
+ cond_resched();
+ raw_spin_lock_irqsave(&object->lock, flags);
+ if (!(object->flags & OBJECT_ALLOCATED))
+ break;
+ }
+ } else if (hlist_empty(&object->area_list) ||
object->flags & OBJECT_FULL_SCAN) {
- void *start = obj_ptr;
- void *end = obj_ptr + object->size;
+ void *start = object->flags & OBJECT_PHYS ?
+ __va((phys_addr_t)object->pointer) :
+ (void *)object->pointer;
+ void *end = start + object->size;
void *next;
do {
@@ -1498,11 +1511,12 @@ static void scan_object(struct kmemleak_object *object)
cond_resched();
raw_spin_lock_irqsave(&object->lock, flags);
} while (object->flags & OBJECT_ALLOCATED);
- } else
+ } else {
hlist_for_each_entry(area, &object->area_list, node)
scan_block((void *)area->start,
(void *)(area->start + area->size),
object);
+ }
out:
raw_spin_unlock_irqrestore(&object->lock, flags);
}
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index c19f47af0424..cf2d70e9c9a5 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -76,7 +76,7 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
/* Don't sleep. */
flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
- handle = __stack_depot_save(entries, nr_entries, flags, true);
+ handle = stack_depot_save(entries, nr_entries, flags);
return stack_depot_set_extra_bits(handle, extra);
}
@@ -185,11 +185,10 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
/*
* @entries is a local var in non-instrumented code, so KMSAN does not
* know it is initialized. Explicitly unpoison it to avoid false
- * positives when __stack_depot_save() passes it to instrumented code.
+ * positives when stack_depot_save() passes it to instrumented code.
*/
kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
- handle = __stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH,
- true);
+ handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
return stack_depot_set_extra_bits(handle, extra_bits);
}
diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c
index ffedf4dbc49d..3ac3b8921d36 100644
--- a/mm/kmsan/init.c
+++ b/mm/kmsan/init.c
@@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void)
struct metadata_page_pair {
struct page *shadow, *origin;
};
-static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
+static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata;
/*
* Eager metadata allocation. When the memblock allocator is freeing pages to
@@ -141,7 +141,7 @@ struct smallstack {
static struct smallstack collect = {
.index = 0,
- .order = MAX_ORDER,
+ .order = MAX_PAGE_ORDER,
};
static void smallstack_push(struct smallstack *stack, struct page *pages)
@@ -211,8 +211,8 @@ static void kmsan_memblock_discard(void)
* order=N-1,
* - repeat.
*/
- collect.order = MAX_ORDER;
- for (int i = MAX_ORDER; i >= 0; i--) {
+ collect.order = MAX_PAGE_ORDER;
+ for (int i = MAX_PAGE_ORDER; i >= 0; i--) {
if (held_back[i].shadow)
smallstack_push(&collect, held_back[i].shadow);
if (held_back[i].origin)
diff --git a/mm/ksm.c b/mm/ksm.c
index 6a831009b4cb..8c001819cf10 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
+#include <linux/sched/cputime.h>
#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
@@ -248,6 +249,9 @@ static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
+/* Default number of pages to scan per batch */
+#define DEFAULT_PAGES_TO_SCAN 100
+
/* The number of pages scanned */
static unsigned long ksm_pages_scanned;
@@ -276,7 +280,7 @@ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
static int ksm_max_page_sharing = 256;
/* Number of pages ksmd should scan in one batch */
-static unsigned int ksm_thread_pages_to_scan = 100;
+static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
@@ -297,6 +301,172 @@ unsigned long ksm_zero_pages;
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
+/* Don't scan more than max pages per batch. */
+static unsigned long ksm_advisor_max_pages_to_scan = 30000;
+
+/* Min CPU for scanning pages per scan */
+#define KSM_ADVISOR_MIN_CPU 10
+
+/* Max CPU for scanning pages per scan */
+static unsigned int ksm_advisor_max_cpu = 70;
+
+/* Target scan time in seconds to analyze all KSM candidate pages. */
+static unsigned long ksm_advisor_target_scan_time = 200;
+
+/* Exponentially weighted moving average. */
+#define EWMA_WEIGHT 30
+
+/**
+ * struct advisor_ctx - metadata for KSM advisor
+ * @start_scan: start time of the current scan
+ * @scan_time: scan time of previous scan
+ * @change: change in percent to pages_to_scan parameter
+ * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
+ */
+struct advisor_ctx {
+ ktime_t start_scan;
+ unsigned long scan_time;
+ unsigned long change;
+ unsigned long long cpu_time;
+};
+static struct advisor_ctx advisor_ctx;
+
+/* Define different advisor's */
+enum ksm_advisor_type {
+ KSM_ADVISOR_NONE,
+ KSM_ADVISOR_SCAN_TIME,
+};
+static enum ksm_advisor_type ksm_advisor;
+
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+
+/* At least scan this many pages per batch. */
+static unsigned long ksm_advisor_min_pages_to_scan = 500;
+
+static void set_advisor_defaults(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_NONE) {
+ ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
+ } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
+ advisor_ctx = (const struct advisor_ctx){ 0 };
+ ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
+ }
+}
+#endif /* CONFIG_SYSFS */
+
+static inline void advisor_start_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ advisor_ctx.start_scan = ktime_get();
+}
+
+/*
+ * Use previous scan time if available, otherwise use current scan time as an
+ * approximation for the previous scan time.
+ */
+static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
+ unsigned long scan_time)
+{
+ return ctx->scan_time ? ctx->scan_time : scan_time;
+}
+
+/* Calculate exponential weighted moving average */
+static unsigned long ewma(unsigned long prev, unsigned long curr)
+{
+ return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
+}
+
+/*
+ * The scan time advisor is based on the current scan rate and the target
+ * scan rate.
+ *
+ * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
+ *
+ * To avoid perturbations it calculates a change factor of previous changes.
+ * A new change factor is calculated for each iteration and it uses an
+ * exponentially weighted moving average. The new pages_to_scan value is
+ * multiplied with that change factor:
+ *
+ * new_pages_to_scan *= change facor
+ *
+ * The new_pages_to_scan value is limited by the cpu min and max values. It
+ * calculates the cpu percent for the last scan and calculates the new
+ * estimated cpu percent cost for the next scan. That value is capped by the
+ * cpu min and max setting.
+ *
+ * In addition the new pages_to_scan value is capped by the max and min
+ * limits.
+ */
+static void scan_time_advisor(void)
+{
+ unsigned int cpu_percent;
+ unsigned long cpu_time;
+ unsigned long cpu_time_diff;
+ unsigned long cpu_time_diff_ms;
+ unsigned long pages;
+ unsigned long per_page_cost;
+ unsigned long factor;
+ unsigned long change;
+ unsigned long last_scan_time;
+ unsigned long scan_time;
+
+ /* Convert scan time to seconds */
+ scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
+ MSEC_PER_SEC);
+ scan_time = scan_time ? scan_time : 1;
+
+ /* Calculate CPU consumption of ksmd background thread */
+ cpu_time = task_sched_runtime(current);
+ cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
+ cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
+
+ cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
+ cpu_percent = cpu_percent ? cpu_percent : 1;
+ last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
+
+ /* Calculate scan time as percentage of target scan time */
+ factor = ksm_advisor_target_scan_time * 100 / scan_time;
+ factor = factor ? factor : 1;
+
+ /*
+ * Calculate scan time as percentage of last scan time and use
+ * exponentially weighted average to smooth it
+ */
+ change = scan_time * 100 / last_scan_time;
+ change = change ? change : 1;
+ change = ewma(advisor_ctx.change, change);
+
+ /* Calculate new scan rate based on target scan rate. */
+ pages = ksm_thread_pages_to_scan * 100 / factor;
+ /* Update pages_to_scan by weighted change percentage. */
+ pages = pages * change / 100;
+
+ /* Cap new pages_to_scan value */
+ per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
+ per_page_cost = per_page_cost ? per_page_cost : 1;
+
+ pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
+ pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
+ pages = min(pages, ksm_advisor_max_pages_to_scan);
+
+ /* Update advisor context */
+ advisor_ctx.change = change;
+ advisor_ctx.scan_time = scan_time;
+ advisor_ctx.cpu_time = cpu_time;
+
+ ksm_thread_pages_to_scan = pages;
+ trace_ksm_advisor(scan_time, pages, cpu_percent);
+}
+
+static void advisor_stop_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ scan_time_advisor();
+}
+
#ifdef CONFIG_NUMA
/* Zeroed when merging across nodes is not allowed */
static unsigned int ksm_merge_across_nodes = 1;
@@ -1099,9 +1269,9 @@ error:
static u32 calc_checksum(struct page *page)
{
u32 checksum;
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
checksum = xxhash(addr, PAGE_SIZE, 0);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return checksum;
}
@@ -1161,8 +1331,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
goto out_unlock;
}
- /* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive && page_try_share_anon_rmap(page)) {
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (anon_exclusive &&
+ folio_try_share_anon_rmap_pte(page_folio(page), page)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
@@ -1199,6 +1370,7 @@ out:
static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte)
{
+ struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
struct folio *folio;
pmd_t *pmd;
@@ -1238,15 +1410,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
goto out_mn;
}
VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
- VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage);
+ VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
+ kfolio);
/*
* No need to check ksm_use_zero_pages here: we can only have a
* zero_page here if ksm_use_zero_pages was enabled already.
*/
if (!is_zero_pfn(page_to_pfn(kpage))) {
- get_page(kpage);
- page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
+ folio_get(kfolio);
+ folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
newpte = mk_pte(kpage, vma->vm_page_prot);
} else {
/*
@@ -1277,7 +1450,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
set_pte_at_notify(mm, addr, ptep, newpte);
folio = page_folio(page);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);
folio_put(folio);
@@ -2401,6 +2574,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
mm_slot = ksm_scan.mm_slot;
if (mm_slot == &ksm_mm_head) {
+ advisor_start_scan();
trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
/*
@@ -2558,6 +2732,8 @@ no_vmas:
if (mm_slot != &ksm_mm_head)
goto next_mm;
+ advisor_stop_scan();
+
trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
ksm_scan.seqnr++;
return NULL;
@@ -2604,11 +2780,9 @@ static int ksm_scan_thread(void *nothing)
ksm_do_scan(ksm_thread_pages_to_scan);
mutex_unlock(&ksm_thread_mutex);
- try_to_freeze();
-
if (ksmd_should_run()) {
sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
- wait_event_interruptible_timeout(ksm_iter_wait,
+ wait_event_freezable_timeout(ksm_iter_wait,
sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
msecs_to_jiffies(sleep_ms));
} else {
@@ -2875,49 +3049,53 @@ void __ksm_exit(struct mm_struct *mm)
trace_ksm_exit(mm);
}
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct folio *folio = page_folio(page);
+ struct page *page = folio_page(folio, 0);
struct anon_vma *anon_vma = folio_anon_vma(folio);
- struct page *new_page;
+ struct folio *new_folio;
- if (PageKsm(page)) {
- if (page_stable_node(page) &&
+ if (folio_test_large(folio))
+ return folio;
+
+ if (folio_test_ksm(folio)) {
+ if (folio_stable_node(folio) &&
!(ksm_run & KSM_RUN_UNMERGE))
- return page; /* no need to copy it */
+ return folio; /* no need to copy it */
} else if (!anon_vma) {
- return page; /* no need to copy it */
- } else if (page->index == linear_page_index(vma, address) &&
+ return folio; /* no need to copy it */
+ } else if (folio->index == linear_page_index(vma, addr) &&
anon_vma->root == vma->anon_vma->root) {
- return page; /* still no need to copy it */
+ return folio; /* still no need to copy it */
}
if (PageHWPoison(page))
return ERR_PTR(-EHWPOISON);
- if (!PageUptodate(page))
- return page; /* let do_swap_page report the error */
-
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (new_page &&
- mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
- put_page(new_page);
- new_page = NULL;
- }
- if (new_page) {
- if (copy_mc_user_highpage(new_page, page, address, vma)) {
- put_page(new_page);
- memory_failure_queue(page_to_pfn(page), 0);
+ if (!folio_test_uptodate(folio))
+ return folio; /* let do_swap_page report the error */
+
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (new_folio &&
+ mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
+ folio_put(new_folio);
+ new_folio = NULL;
+ }
+ if (new_folio) {
+ if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+ addr, vma)) {
+ folio_put(new_folio);
+ memory_failure_queue(folio_pfn(folio), 0);
return ERR_PTR(-EHWPOISON);
}
- SetPageDirty(new_page);
- __SetPageUptodate(new_page);
- __SetPageLocked(new_page);
+ folio_set_dirty(new_folio);
+ __folio_mark_uptodate(new_folio);
+ __folio_set_locked(new_folio);
#ifdef CONFIG_SWAP
count_vm_event(KSM_SWPIN_COPY);
#endif
}
- return new_page;
+ return new_folio;
}
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
@@ -3244,6 +3422,9 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
unsigned int nr_pages;
int err;
+ if (ksm_advisor != KSM_ADVISOR_NONE)
+ return -EINVAL;
+
err = kstrtouint(buf, 10, &nr_pages);
if (err)
return -EINVAL;
@@ -3563,6 +3744,130 @@ static ssize_t smart_scan_store(struct kobject *kobj,
}
KSM_ATTR(smart_scan);
+static ssize_t advisor_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (ksm_advisor == KSM_ADVISOR_NONE)
+ output = "[none] scan-time";
+ else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ output = "none [scan-time]";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t advisor_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ enum ksm_advisor_type curr_advisor = ksm_advisor;
+
+ if (sysfs_streq("scan-time", buf))
+ ksm_advisor = KSM_ADVISOR_SCAN_TIME;
+ else if (sysfs_streq("none", buf))
+ ksm_advisor = KSM_ADVISOR_NONE;
+ else
+ return -EINVAL;
+
+ /* Set advisor default values */
+ if (curr_advisor != ksm_advisor)
+ set_advisor_defaults();
+
+ return count;
+}
+KSM_ATTR(advisor_mode);
+
+static ssize_t advisor_max_cpu_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
+}
+
+static ssize_t advisor_max_cpu_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_cpu = value;
+ return count;
+}
+KSM_ATTR(advisor_max_cpu);
+
+static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
+}
+
+static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_min_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_min_pages_to_scan);
+
+static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
+}
+
+static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_max_pages_to_scan);
+
+static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
+}
+
+static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+ if (value < 1)
+ return -EINVAL;
+
+ ksm_advisor_target_scan_time = value;
+ return count;
+}
+KSM_ATTR(advisor_target_scan_time);
+
static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
@@ -3585,6 +3890,11 @@ static struct attribute *ksm_attrs[] = {
&use_zero_pages_attr.attr,
&general_profit_attr.attr,
&smart_scan_attr.attr,
+ &advisor_mode_attr.attr,
+ &advisor_max_cpu_attr.attr,
+ &advisor_min_pages_to_scan_attr.attr,
+ &advisor_max_pages_to_scan_attr.attr,
+ &advisor_target_scan_time_attr.attr,
NULL,
};
diff --git a/mm/list_lru.c b/mm/list_lru.c
index a05e5bef3b40..35b0147542a9 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -59,28 +59,6 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
return &lru->node[nid].lru;
}
-
-static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
- struct mem_cgroup **memcg_ptr)
-{
- struct list_lru_node *nlru = &lru->node[nid];
- struct list_lru_one *l = &nlru->lru;
- struct mem_cgroup *memcg = NULL;
-
- if (!list_lru_memcg_aware(lru))
- goto out;
-
- memcg = mem_cgroup_from_slab_obj(ptr);
- if (!memcg)
- goto out;
-
- l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
-out:
- if (memcg_ptr)
- *memcg_ptr = memcg;
- return l;
-}
#else
static void list_lru_register(struct list_lru *lru)
{
@@ -105,32 +83,21 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
return &lru->node[nid].lru;
}
-
-static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
- struct mem_cgroup **memcg_ptr)
-{
- if (memcg_ptr)
- *memcg_ptr = NULL;
- return &lru->node[nid].lru;
-}
#endif /* CONFIG_MEMCG_KMEM */
-bool list_lru_add(struct list_lru *lru, struct list_head *item)
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
{
- int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
- struct mem_cgroup *memcg;
struct list_lru_one *l;
spin_lock(&nlru->lock);
if (list_empty(item)) {
- l = list_lru_from_kmem(lru, nid, item, &memcg);
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
- set_shrinker_bit(memcg, nid,
- lru_shrinker_id(lru));
+ set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
@@ -140,15 +107,25 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
}
EXPORT_SYMBOL_GPL(list_lru_add);
-bool list_lru_del(struct list_lru *lru, struct list_head *item)
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
+ struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+ mem_cgroup_from_slab_obj(item) : NULL;
+
+ return list_lru_add(lru, item, nid, memcg);
+}
+EXPORT_SYMBOL_GPL(list_lru_add_obj);
+
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+{
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
spin_lock(&nlru->lock);
if (!list_empty(item)) {
- l = list_lru_from_kmem(lru, nid, item, NULL);
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
list_del_init(item);
l->nr_items--;
nlru->nr_items--;
@@ -160,6 +137,16 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
}
EXPORT_SYMBOL_GPL(list_lru_del);
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
+{
+ int nid = page_to_nid(virt_to_page(item));
+ struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+ mem_cgroup_from_slab_obj(item) : NULL;
+
+ return list_lru_del(lru, item, nid, memcg);
+}
+EXPORT_SYMBOL_GPL(list_lru_del_obj);
+
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
list_del_init(item);
@@ -175,6 +162,20 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
}
EXPORT_SYMBOL_GPL(list_lru_isolate_move);
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+{
+ struct list_lru_one *list =
+ list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
+
+ if (list_empty(item)) {
+ list_add_tail(item, &list->list);
+ if (!list->nr_items++)
+ set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
+ }
+}
+EXPORT_SYMBOL_GPL(list_lru_putback);
+
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg)
{
diff --git a/mm/madvise.c b/mm/madvise.c
index 6214a1ab5654..912155a94ed5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
for (addr = start; addr < end; addr += PAGE_SIZE) {
pte_t pte;
swp_entry_t entry;
- struct page *page;
+ struct folio *folio;
if (!ptep++) {
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
pte_unmap_unlock(ptep, ptl);
ptep = NULL;
- page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+ folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
}
if (ptep)
@@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
{
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
pgoff_t end_index = linear_page_index(vma, end) - 1;
- struct page *page;
+ struct folio *folio;
struct swap_iocb *splug = NULL;
rcu_read_lock();
- xas_for_each(&xas, page, end_index) {
+ xas_for_each(&xas, folio, end_index) {
unsigned long addr;
swp_entry_t entry;
- if (!xa_is_value(page))
+ if (!xa_is_value(folio))
continue;
- entry = radix_to_swp_entry(page);
+ entry = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(entry))
continue;
@@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
xas_pause(&xas);
rcu_read_unlock();
- page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+ folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
rcu_read_lock();
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 5a88d6d24d79..8c194d8afeec 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -735,6 +735,40 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
}
/**
+ * memblock_validate_numa_coverage - check if amount of memory with
+ * no node ID assigned is less than a threshold
+ * @threshold_bytes: maximal number of pages that can have unassigned node
+ * ID (in bytes).
+ *
+ * A buggy firmware may report memory that does not belong to any node.
+ * Check if amount of such memory is below @threshold_bytes.
+ *
+ * Return: true on success, false on failure.
+ */
+bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
+{
+ unsigned long nr_pages = 0;
+ unsigned long start_pfn, end_pfn, mem_size_mb;
+ int nid, i;
+
+ /* calculate lose page */
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ if (nid == NUMA_NO_NODE)
+ nr_pages += end_pfn - start_pfn;
+ }
+
+ if ((nr_pages << PAGE_SHIFT) >= threshold_bytes) {
+ mem_size_mb = memblock_phys_mem_size() >> 20;
+ pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
+ (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
* memblock_isolate_range - isolate given range into disjoint memblocks
* @type: memblock type to isolate range for
* @base: base of range to isolate
@@ -2079,12 +2113,13 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
* Free the pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (start)
- order = min_t(int, MAX_ORDER, __ffs(start));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
while (start + (1UL << order) > end)
order--;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b226090fd906..e4c8735e7c85 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -64,6 +64,7 @@
#include <linux/psi.h>
#include <linux/seq_buf.h>
#include <linux/sched/isolation.h>
+#include <linux/kmemleak.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -573,116 +574,6 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
return mz;
}
-/*
- * memcg and lruvec stats flushing
- *
- * Many codepaths leading to stats update or read are performance sensitive and
- * adding stats flushing in such codepaths is not desirable. So, to optimize the
- * flushing the kernel does:
- *
- * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
- * rstat update tree grow unbounded.
- *
- * 2) Flush the stats synchronously on reader side only when there are more than
- * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
- * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
- * only for 2 seconds due to (1).
- */
-static void flush_memcg_stats_dwork(struct work_struct *w);
-static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static DEFINE_PER_CPU(unsigned int, stats_updates);
-static atomic_t stats_flush_ongoing = ATOMIC_INIT(0);
-static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
-static u64 flush_next_time;
-
-#define FLUSH_TIME (2UL*HZ)
-
-/*
- * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
- * not rely on this as part of an acquired spinlock_t lock. These functions are
- * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
- * is sufficient.
- */
-static void memcg_stats_lock(void)
-{
- preempt_disable_nested();
- VM_WARN_ON_IRQS_ENABLED();
-}
-
-static void __memcg_stats_lock(void)
-{
- preempt_disable_nested();
-}
-
-static void memcg_stats_unlock(void)
-{
- preempt_enable_nested();
-}
-
-static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
-{
- unsigned int x;
-
- if (!val)
- return;
-
- cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
-
- x = __this_cpu_add_return(stats_updates, abs(val));
- if (x > MEMCG_CHARGE_BATCH) {
- /*
- * If stats_flush_threshold exceeds the threshold
- * (>num_online_cpus()), cgroup stats update will be triggered
- * in __mem_cgroup_flush_stats(). Increasing this var further
- * is redundant and simply adds overhead in atomic update.
- */
- if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
- atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
- __this_cpu_write(stats_updates, 0);
- }
-}
-
-static void do_flush_stats(void)
-{
- /*
- * We always flush the entire tree, so concurrent flushers can just
- * skip. This avoids a thundering herd problem on the rstat global lock
- * from memcg flushers (e.g. reclaim, refault, etc).
- */
- if (atomic_read(&stats_flush_ongoing) ||
- atomic_xchg(&stats_flush_ongoing, 1))
- return;
-
- WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
-
- cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
-
- atomic_set(&stats_flush_threshold, 0);
- atomic_set(&stats_flush_ongoing, 0);
-}
-
-void mem_cgroup_flush_stats(void)
-{
- if (atomic_read(&stats_flush_threshold) > num_online_cpus())
- do_flush_stats();
-}
-
-void mem_cgroup_flush_stats_ratelimited(void)
-{
- if (time_after64(jiffies_64, READ_ONCE(flush_next_time)))
- mem_cgroup_flush_stats();
-}
-
-static void flush_memcg_stats_dwork(struct work_struct *w)
-{
- /*
- * Always flush here so that flushing in latency-sensitive paths is
- * as cheap as possible.
- */
- do_flush_stats();
- queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
-}
-
/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
PGPGIN,
@@ -703,6 +594,7 @@ static const unsigned int memcg_vm_event_stat[] = {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
@@ -740,6 +632,9 @@ struct memcg_vmstats_percpu {
/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
+
+ /* Stats updates since the last flush */
+ unsigned int stats_updates;
};
struct memcg_vmstats {
@@ -754,8 +649,134 @@ struct memcg_vmstats {
/* Pending child counts during tree propagation */
long state_pending[MEMCG_NR_STAT];
unsigned long events_pending[NR_MEMCG_EVENTS];
+
+ /* Stats updates since the last flush */
+ atomic64_t stats_updates;
};
+/*
+ * memcg and lruvec stats flushing
+ *
+ * Many codepaths leading to stats update or read are performance sensitive and
+ * adding stats flushing in such codepaths is not desirable. So, to optimize the
+ * flushing the kernel does:
+ *
+ * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
+ * rstat update tree grow unbounded.
+ *
+ * 2) Flush the stats synchronously on reader side only when there are more than
+ * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
+ * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
+ * only for 2 seconds due to (1).
+ */
+static void flush_memcg_stats_dwork(struct work_struct *w);
+static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
+static u64 flush_last_time;
+
+#define FLUSH_TIME (2UL*HZ)
+
+/*
+ * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
+ * not rely on this as part of an acquired spinlock_t lock. These functions are
+ * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
+ * is sufficient.
+ */
+static void memcg_stats_lock(void)
+{
+ preempt_disable_nested();
+ VM_WARN_ON_IRQS_ENABLED();
+}
+
+static void __memcg_stats_lock(void)
+{
+ preempt_disable_nested();
+}
+
+static void memcg_stats_unlock(void)
+{
+ preempt_enable_nested();
+}
+
+
+static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
+{
+ return atomic64_read(&memcg->vmstats->stats_updates) >
+ MEMCG_CHARGE_BATCH * num_online_cpus();
+}
+
+static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
+{
+ int cpu = smp_processor_id();
+ unsigned int x;
+
+ if (!val)
+ return;
+
+ cgroup_rstat_updated(memcg->css.cgroup, cpu);
+
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+ x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
+ abs(val));
+
+ if (x < MEMCG_CHARGE_BATCH)
+ continue;
+
+ /*
+ * If @memcg is already flush-able, increasing stats_updates is
+ * redundant. Avoid the overhead of the atomic update.
+ */
+ if (!memcg_should_flush_stats(memcg))
+ atomic64_add(x, &memcg->vmstats->stats_updates);
+ __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
+ }
+}
+
+static void do_flush_stats(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_is_root(memcg))
+ WRITE_ONCE(flush_last_time, jiffies_64);
+
+ cgroup_rstat_flush(memcg->css.cgroup);
+}
+
+/*
+ * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
+ * @memcg: root of the subtree to flush
+ *
+ * Flushing is serialized by the underlying global rstat lock. There is also a
+ * minimum amount of work to be done even if there are no stat updates to flush.
+ * Hence, we only flush the stats if the updates delta exceeds a threshold. This
+ * avoids unnecessary work and contention on the underlying lock.
+ */
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return;
+
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
+ if (memcg_should_flush_stats(memcg))
+ do_flush_stats(memcg);
+}
+
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
+{
+ /* Only flush if the periodic flusher is one full cycle late */
+ if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
+ mem_cgroup_flush_stats(memcg);
+}
+
+static void flush_memcg_stats_dwork(struct work_struct *w)
+{
+ /*
+ * Deliberately ignore memcg_should_flush_stats() here so that flushing
+ * in latency-sensitive paths is as cheap as possible.
+ */
+ do_flush_stats(root_mem_cgroup);
+ queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
+}
+
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
long x = READ_ONCE(memcg->vmstats->state[idx]);
@@ -870,16 +891,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
__mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
+void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
- struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg;
- pg_data_t *pgdat = page_pgdat(page);
+ pg_data_t *pgdat = folio_pgdat(folio);
struct lruvec *lruvec;
rcu_read_lock();
- memcg = page_memcg(head);
+ memcg = folio_memcg(folio);
/* Untracked pages have no memcg, no lruvec. Update only the node */
if (!memcg) {
rcu_read_unlock();
@@ -891,7 +911,7 @@ void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
__mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__mod_lruvec_page_state);
+EXPORT_SYMBOL(__lruvec_stat_mod_folio);
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
@@ -1627,7 +1647,7 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
*
* Current memory state:
*/
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
u64 size;
@@ -4177,7 +4197,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
int nid;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
seq_printf(m, "%s=%lu", stat->name,
@@ -4258,7 +4278,7 @@ static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
@@ -4379,7 +4399,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* i = current_threshold + 1 */
i++;
@@ -4391,7 +4411,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* Update current_threshold */
t->current_threshold = i - 1;
@@ -4431,7 +4451,7 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
spin_lock(&memcg_oom_lock);
list_for_each_entry(ev, &memcg->oom_notify, list)
- eventfd_signal(ev->eventfd, 1);
+ eventfd_signal(ev->eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4650,7 +4670,7 @@ static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
/* already in OOM ? */
if (memcg->under_oom)
- eventfd_signal(eventfd, 1);
+ eventfd_signal(eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4754,7 +4774,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
@@ -4942,7 +4962,7 @@ static void memcg_event_remove(struct work_struct *work)
event->unregister_event(memcg, event->eventfd);
/* Notify userspace the event is going away. */
- eventfd_signal(event->eventfd, 1);
+ eventfd_signal(event->eventfd);
eventfd_ctx_put(event->eventfd);
kfree(event);
@@ -5150,7 +5170,7 @@ out_kfree:
return ret;
}
-#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
{
/*
@@ -5259,8 +5279,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
-#if defined(CONFIG_MEMCG_KMEM) && \
- (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
{
.name = "kmem.slabinfo",
.seq_show = mem_cgroup_slab_show,
@@ -5518,6 +5537,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
memcg->zswap_max = PAGE_COUNTER_MAX;
+ WRITE_ONCE(memcg->zswap_writeback,
+ !parent || READ_ONCE(parent->zswap_writeback));
#endif
page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
if (parent) {
@@ -5614,6 +5635,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
page_counter_set_min(&memcg->memory, 0);
page_counter_set_low(&memcg->memory, 0);
+ zswap_memcg_offline_cleanup(memcg);
+
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
@@ -5784,6 +5807,10 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
}
}
}
+ statc->stats_updates = 0;
+ /* We are in a per-cpu loop here, only do the atomic write once */
+ if (atomic64_read(&memcg->vmstats->stats_updates))
+ atomic64_set(&memcg->vmstats->stats_updates, 0);
}
#ifdef CONFIG_MMU
@@ -6783,6 +6810,10 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
return nbytes;
}
+/*
+ * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
+ * if any new events become available.
+ */
static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
{
seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
@@ -6839,7 +6870,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
int i;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
int nid;
@@ -7543,6 +7574,17 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
/* Transfer the charge and the css ref */
commit_charge(new, memcg);
+ /*
+ * If the old folio is a large folio and is in the split queue, it needs
+ * to be removed from the split queue now, in case getting an incorrect
+ * split queue in destroy_large_folio() after the memcg of the old folio
+ * is cleared.
+ *
+ * In addition, the old folio is about to be freed after migration, so
+ * removing from the split queue a bit earlier seems reasonable.
+ */
+ if (folio_test_large(old) && folio_test_large_rmappable(old))
+ folio_undo_large_rmappable(old);
old->memcg_data = 0;
}
@@ -8070,7 +8112,11 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
break;
}
- cgroup_rstat_flush(memcg->css.cgroup);
+ /*
+ * mem_cgroup_flush_stats() ignores small changes. Use
+ * do_flush_stats() directly to get accurate stats for charging.
+ */
+ do_flush_stats(memcg);
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
if (pages < max)
continue;
@@ -8132,11 +8178,19 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
rcu_read_unlock();
}
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
+}
+
static u64 zswap_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- cgroup_rstat_flush(css->cgroup);
- return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+ mem_cgroup_flush_stats(memcg);
+ return memcg_page_state(memcg, MEMCG_ZSWAP_B);
}
static int zswap_max_show(struct seq_file *m, void *v)
@@ -8162,6 +8216,31 @@ static ssize_t zswap_max_write(struct kernfs_open_file *of,
return nbytes;
}
+static int zswap_writeback_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
+
+ seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
+ return 0;
+}
+
+static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ int zswap_writeback;
+ ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
+
+ if (parse_ret)
+ return parse_ret;
+
+ if (zswap_writeback != 0 && zswap_writeback != 1)
+ return -EINVAL;
+
+ WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
+ return nbytes;
+}
+
static struct cftype zswap_files[] = {
{
.name = "zswap.current",
@@ -8174,6 +8253,11 @@ static struct cftype zswap_files[] = {
.seq_show = zswap_max_show,
.write = zswap_max_write,
},
+ {
+ .name = "zswap.writeback",
+ .seq_show = zswap_writeback_show,
+ .write = zswap_writeback_write,
+ },
{ } /* terminate */
};
#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 455093f73a70..a0d9b4ac7d54 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -901,39 +901,38 @@ static const char * const action_page_types[] = {
* The page count will stop it from being freed by unpoison.
* Stress tests should be aware of this memory leak problem.
*/
-static int delete_from_lru_cache(struct page *p)
+static int delete_from_lru_cache(struct folio *folio)
{
- if (isolate_lru_page(p)) {
+ if (folio_isolate_lru(folio)) {
/*
* Clear sensible page flags, so that the buddy system won't
- * complain when the page is unpoison-and-freed.
+ * complain when the folio is unpoison-and-freed.
*/
- ClearPageActive(p);
- ClearPageUnevictable(p);
+ folio_clear_active(folio);
+ folio_clear_unevictable(folio);
/*
* Poisoned page might never drop its ref count to 0 so we have
* to uncharge it manually from its memcg.
*/
- mem_cgroup_uncharge(page_folio(p));
+ mem_cgroup_uncharge(folio);
/*
- * drop the page count elevated by isolate_lru_page()
+ * drop the refcount elevated by folio_isolate_lru()
*/
- put_page(p);
+ folio_put(folio);
return 0;
}
return -EIO;
}
-static int truncate_error_page(struct page *p, unsigned long pfn,
+static int truncate_error_folio(struct folio *folio, unsigned long pfn,
struct address_space *mapping)
{
int ret = MF_FAILED;
- if (mapping->a_ops->error_remove_page) {
- struct folio *folio = page_folio(p);
- int err = mapping->a_ops->error_remove_page(mapping, p);
+ if (mapping->a_ops->error_remove_folio) {
+ int err = mapping->a_ops->error_remove_folio(mapping, folio);
if (err != 0)
pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
@@ -946,7 +945,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
* If the file system doesn't support it just invalidate
* This fails on dirty or anything with private pages
*/
- if (invalidate_inode_page(p))
+ if (mapping_evict_folio(mapping, folio))
ret = MF_RECOVERED;
else
pr_info("%#lx: Failed to invalidate\n", pfn);
@@ -1013,17 +1012,18 @@ static int me_unknown(struct page_state *ps, struct page *p)
*/
static int me_pagecache_clean(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int ret;
struct address_space *mapping;
bool extra_pins;
- delete_from_lru_cache(p);
+ delete_from_lru_cache(folio);
/*
- * For anonymous pages we're done the only reference left
+ * For anonymous folios the only reference left
* should be the one m_f() holds.
*/
- if (PageAnon(p)) {
+ if (folio_test_anon(folio)) {
ret = MF_RECOVERED;
goto out;
}
@@ -1035,11 +1035,9 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
* has a reference, because it could be file system metadata
* and that's not safe to truncate.
*/
- mapping = page_mapping(p);
+ mapping = folio_mapping(folio);
if (!mapping) {
- /*
- * Page has been teared down in the meanwhile
- */
+ /* Folio has been torn down in the meantime */
ret = MF_FAILED;
goto out;
}
@@ -1055,12 +1053,12 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
*
* Open: to take i_rwsem or not for this? Right now we don't.
*/
- ret = truncate_error_page(p, page_to_pfn(p), mapping);
+ ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
if (has_extra_refcount(ps, p, extra_pins))
ret = MF_FAILED;
out:
- unlock_page(p);
+ folio_unlock(folio);
return ret;
}
@@ -1138,15 +1136,16 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
*/
static int me_swapcache_dirty(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int ret;
bool extra_pins = false;
- ClearPageDirty(p);
+ folio_clear_dirty(folio);
/* Trigger EIO in shmem: */
- ClearPageUptodate(p);
+ folio_clear_uptodate(folio);
- ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
- unlock_page(p);
+ ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
+ folio_unlock(folio);
if (ret == MF_DELAYED)
extra_pins = true;
@@ -1164,7 +1163,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
delete_from_swap_cache(folio);
- ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+ ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
folio_unlock(folio);
if (has_extra_refcount(ps, p, false))
@@ -1181,25 +1180,25 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
*/
static int me_huge_page(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int res;
- struct page *hpage = compound_head(p);
struct address_space *mapping;
bool extra_pins = false;
- mapping = page_mapping(hpage);
+ mapping = folio_mapping(folio);
if (mapping) {
- res = truncate_error_page(hpage, page_to_pfn(p), mapping);
+ res = truncate_error_folio(folio, page_to_pfn(p), mapping);
/* The page is kept in page cache. */
extra_pins = true;
- unlock_page(hpage);
+ folio_unlock(folio);
} else {
- unlock_page(hpage);
+ folio_unlock(folio);
/*
* migration entry prevents later access on error hugepage,
* so we can free and dissolve it into buddy to save healthy
* subpages.
*/
- put_page(hpage);
+ folio_put(folio);
if (__page_handle_poison(p) >= 0) {
page_ref_inc(p);
res = MF_RECOVERED;
@@ -2316,8 +2315,8 @@ try_again:
* We use page flags to determine what action should be taken, but
* the flags can be modified by the error containment action. One
* example is an mlocked page, where PG_mlocked is cleared by
- * page_remove_rmap() in try_to_unmap_one(). So to determine page status
- * correctly, we save a copy of the page flags at this time.
+ * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
+ * status correctly, we save a copy of the page flags at this time.
*/
page_flags = p->flags;
@@ -2601,37 +2600,37 @@ unlock_mutex:
}
EXPORT_SYMBOL(unpoison_memory);
-static bool isolate_page(struct page *page, struct list_head *pagelist)
+static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
{
bool isolated = false;
- if (PageHuge(page)) {
- isolated = isolate_hugetlb(page_folio(page), pagelist);
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, pagelist);
} else {
- bool lru = !__PageMovable(page);
+ bool lru = !__folio_test_movable(folio);
if (lru)
- isolated = isolate_lru_page(page);
+ isolated = folio_isolate_lru(folio);
else
- isolated = isolate_movable_page(page,
+ isolated = isolate_movable_page(&folio->page,
ISOLATE_UNEVICTABLE);
if (isolated) {
- list_add(&page->lru, pagelist);
+ list_add(&folio->lru, pagelist);
if (lru)
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
}
}
/*
- * If we succeed to isolate the page, we grabbed another refcount on
- * the page, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the page, it means that we cannot go further
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
* and we will return an error, so drop the reference we got from
* get_any_page() as well.
*/
- put_page(page);
+ folio_put(folio);
return isolated;
}
@@ -2644,40 +2643,40 @@ static int soft_offline_in_use_page(struct page *page)
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_head(page);
+ struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
- bool huge = PageHuge(page);
+ bool huge = folio_test_hugetlb(folio);
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
- if (!huge && PageTransHuge(hpage)) {
+ if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page)) {
pr_info("soft offline: %#lx: thp split failed\n", pfn);
return -EBUSY;
}
- hpage = page;
+ folio = page_folio(page);
}
- lock_page(page);
+ folio_lock(folio);
if (!huge)
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
if (PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return 0;
}
- if (!huge && PageLRU(page) && !PageSwapCache(page))
+ if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
- ret = invalidate_inode_page(page);
- unlock_page(page);
+ ret = mapping_evict_folio(folio_mapping(folio), folio);
+ folio_unlock(folio);
if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn);
@@ -2685,7 +2684,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (isolate_page(hpage, &pagelist)) {
+ if (mf_isolate_folio(folio, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
diff --git a/mm/memory.c b/mm/memory.c
index 5c757fba8858..7e1f4849463a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -123,9 +123,7 @@ static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
- * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
- * and ZONE_HIGHMEM.
+ * of ZONE_NORMAL.
*/
void *high_memory;
EXPORT_SYMBOL(high_memory);
@@ -374,6 +372,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
* be 0. This will underflow and is okay.
*/
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
/*
* Hide vma from rmap and truncate_pagecache before freeing
@@ -395,6 +395,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
@@ -706,6 +708,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
{
+ struct folio *folio = page_folio(page);
pte_t orig_pte;
pte_t pte;
swp_entry_t entry;
@@ -721,14 +724,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
else if (is_writable_device_exclusive_entry(entry))
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
+ VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
+ PageAnonExclusive(page)), folio);
/*
* No need to take a page reference as one was already
* created when the swap entry was made.
*/
- if (PageAnon(page))
- page_add_anon_rmap(page, vma, address, RMAP_NONE);
+ if (folio_test_anon(folio))
+ folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
else
/*
* Currently device exclusive access only supports anonymous
@@ -779,6 +783,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long vm_flags = dst_vma->vm_flags;
pte_t orig_pte = ptep_get(src_pte);
pte_t pte = orig_pte;
+ struct folio *folio;
struct page *page;
swp_entry_t entry = pte_to_swp_entry(orig_pte);
@@ -823,6 +828,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
} else if (is_device_private_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
/*
* Update rss count even for unaddressable pages, as
@@ -833,10 +839,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* for unaddressable pages, at some point. But for now
* keep things as they are.
*/
- get_page(page);
+ folio_get(folio);
rss[mm_counter(page)]++;
/* Cannot fail as these pages cannot get pinned. */
- BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
+ folio_try_dup_anon_rmap_pte(folio, page, src_vma);
/*
* We do not preserve soft-dirty information, because so
@@ -950,7 +956,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
* future.
*/
folio_get(folio);
- if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
+ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
@@ -959,7 +965,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
rss[MM_ANONPAGES]++;
} else if (page) {
folio_get(folio);
- page_dup_file_rmap(page, false);
+ folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(page)]++;
}
@@ -988,12 +994,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return 0;
}
-static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
- struct vm_area_struct *vma, unsigned long addr)
+static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
+ struct vm_area_struct *vma, unsigned long addr, bool need_zero)
{
struct folio *new_folio;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (need_zero)
+ new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
+ else
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ addr, false);
+
if (!new_folio)
return NULL;
@@ -1125,7 +1136,7 @@ again:
} else if (ret == -EBUSY) {
goto out;
} else if (ret == -EAGAIN) {
- prealloc = page_copy_prealloc(src_mm, src_vma, addr);
+ prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc)
return -ENOMEM;
} else if (ret) {
@@ -1423,6 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = ptep_get(pte);
+ struct folio *folio;
struct page *page;
if (pte_none(ptent))
@@ -1448,21 +1460,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
+ folio = page_folio(page);
delay_rmap = 0;
- if (!PageAnon(page)) {
+ if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
- set_page_dirty(page);
+ folio_set_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = 1;
force_flush = 1;
}
}
if (pte_young(ptent) && likely(vma_has_recency(vma)))
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
}
rss[mm_counter(page)]--;
if (!delay_rmap) {
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
}
@@ -1478,6 +1491,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (is_device_private_entry(entry) ||
is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
if (unlikely(!should_zap_page(details, page)))
continue;
/*
@@ -1489,8 +1503,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(page)]--;
if (is_device_private_entry(entry))
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
} else if (!non_swap_entry(entry)) {
/* Genuine swap entry, hence a private anon page */
if (!should_zap_cows(details))
@@ -1744,7 +1758,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
hugetlb_zap_end(vma, &details);
- } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
+ vma = mas_find(mas, tree_end - 1);
+ } while (vma && likely(!xa_is_zero(vma)));
mmu_notifier_invalidate_range_end(&range);
}
@@ -1837,21 +1852,26 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
static int validate_page_before_insert(struct page *page)
{
- if (PageAnon(page) || PageSlab(page) || page_has_type(page))
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_anon(folio) || folio_test_slab(folio) ||
+ page_has_type(page))
return -EINVAL;
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
return 0;
}
static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct folio *folio = page_folio(page);
+
if (!pte_none(ptep_get(pte)))
return -EBUSY;
/* Ok, finally just insert the thing.. */
- get_page(page);
+ folio_get(folio);
inc_mm_counter(vma->vm_mm, mm_counter_file(page));
- page_add_file_rmap(page, vma, false);
+ folio_add_file_rmap_pte(folio, page, vma);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0;
}
@@ -2836,7 +2856,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
- kaddr = kmap_atomic(dst);
+ kaddr = kmap_local_page(dst);
+ pagefault_disable();
uaddr = (void __user *)(addr & PAGE_MASK);
/*
@@ -2904,7 +2925,8 @@ warn:
pte_unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
- kunmap_atomic(kaddr);
+ pagefault_enable();
+ kunmap_local(kaddr);
flush_dcache_page(dst);
return ret;
@@ -3102,6 +3124,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
int page_copied = 0;
struct mmu_notifier_range range;
vm_fault_t ret;
+ bool pfn_is_zero;
delayacct_wpcopy_start();
@@ -3111,16 +3134,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (unlikely(ret))
goto out;
- if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
- if (!new_folio)
- goto oom;
- } else {
+ pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
+ new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
+ if (!new_folio)
+ goto oom;
+
+ if (!pfn_is_zero) {
int err;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
- vmf->address, false);
- if (!new_folio)
- goto oom;
err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
if (err) {
@@ -3141,10 +3161,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
kmsan_copy_page_meta(&new_folio->page, vmf->page);
}
- if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
- goto oom_free_new;
- folio_throttle_swaprate(new_folio, GFP_KERNEL);
-
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
@@ -3207,10 +3223,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* threads.
*
* The critical issue is to order this
- * page_remove_rmap with the ptp_clear_flush above.
- * Those stores are ordered by (if nothing else,)
+ * folio_remove_rmap_pte() with the ptp_clear_flush
+ * above. Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
- * in page_remove_rmap.
+ * in folio_remove_rmap_pte();
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
@@ -3219,7 +3235,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
- page_remove_rmap(vmf->page, vma, false);
+ folio_remove_rmap_pte(old_folio, vmf->page, vma);
}
/* Free the old page.. */
@@ -3243,8 +3259,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_end();
return 0;
-oom_free_new:
- folio_put(new_folio);
oom:
ret = VM_FAULT_OOM;
out:
@@ -3624,8 +3638,8 @@ EXPORT_SYMBOL_GPL(unmap_mapping_pages);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
- pgoff_t hba = holebegin >> PAGE_SHIFT;
- pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
+ pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
@@ -3875,9 +3889,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_add_lru(folio);
- /* To provide entry to swap_readpage() */
+ /* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_readpage(page, true, NULL);
+ swap_read_folio(folio, true, NULL);
folio->private = NULL;
}
} else {
@@ -3935,15 +3949,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* page->index of !PageKSM() pages would be nonlinear inside the
* anon VMA -- PageKSM() is lost on actual swapout.
*/
- page = ksm_might_need_to_copy(page, vma, vmf->address);
- if (unlikely(!page)) {
+ folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+ if (unlikely(!folio)) {
ret = VM_FAULT_OOM;
+ folio = swapcache;
goto out_page;
- } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+ } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
ret = VM_FAULT_HWPOISON;
+ folio = swapcache;
goto out_page;
}
- folio = page_folio(page);
+ if (folio != swapcache)
+ page = folio_page(folio, 0);
/*
* If we want to map a page that's in the swapcache writable, we
@@ -4061,10 +4078,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- page_add_new_anon_rmap(page, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, vmf->address);
folio_add_lru_vma(folio, vma);
} else {
- page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
+ folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
@@ -4118,6 +4136,84 @@ out_release:
return ret;
}
+static bool pte_range_none(pte_t *pte, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (!pte_none(ptep_get_lockless(pte + i)))
+ return false;
+ }
+
+ return true;
+}
+
+static struct folio *alloc_anon_folio(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long orders;
+ struct folio *folio;
+ unsigned long addr;
+ pte_t *pte;
+ gfp_t gfp;
+ int order;
+
+ /*
+ * If uffd is active for the vma we need per-page fault fidelity to
+ * maintain the uffd semantics.
+ */
+ if (unlikely(userfaultfd_armed(vma)))
+ goto fallback;
+
+ /*
+ * Get a list of all the (large) orders below PMD_ORDER that are enabled
+ * for this vma. Then filter out the orders that can't be allocated over
+ * the faulting address and still be fully contained in the vma.
+ */
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
+ BIT(PMD_ORDER) - 1);
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+
+ if (!orders)
+ goto fallback;
+
+ pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
+ if (!pte)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * Find the highest order where the aligned range is completely
+ * pte_none(). Note that all remaining orders will be completely
+ * pte_none().
+ */
+ order = highest_order(orders);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ if (pte_range_none(pte + pte_index(addr), 1 << order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ pte_unmap(pte);
+
+ /* Try allocating the highest of the remaining orders. */
+ gfp = vma_thp_gfp_mask(vma);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ if (folio) {
+ clear_huge_page(&folio->page, vmf->address, 1 << order);
+ return folio;
+ }
+ order = next_order(&orders, order);
+ }
+
+fallback:
+#endif
+ return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address);
+}
+
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -4127,9 +4223,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address;
struct folio *folio;
vm_fault_t ret = 0;
+ int nr_pages = 1;
pte_t entry;
+ int i;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
@@ -4169,10 +4268,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
+ folio = alloc_anon_folio(vmf);
+ if (IS_ERR(folio))
+ return 0;
if (!folio)
goto oom;
+ nr_pages = folio_nr_pages(folio);
+ addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
folio_throttle_swaprate(folio, GFP_KERNEL);
@@ -4189,12 +4294,15 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry), vma);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
if (!vmf->pte)
goto release;
- if (vmf_pte_changed(vmf)) {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
+ if (nr_pages == 1 && vmf_pte_changed(vmf)) {
+ update_mmu_tlb(vma, addr, vmf->pte);
+ goto release;
+ } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ for (i = 0; i < nr_pages; i++)
+ update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i);
goto release;
}
@@ -4209,16 +4317,17 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
return handle_userfault(vmf, VM_UFFD_MISSING);
}
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_ref_add(folio, nr_pages - 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma);
setpte:
if (uffd_wp)
entry = pte_mkuffd_wp(entry);
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
+ set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4240,6 +4349,7 @@ oom:
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
/*
@@ -4268,27 +4378,26 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
VM_FAULT_DONE_COW)))
return ret;
+ folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
- struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
- if (page_mapped(page))
- unmap_mapping_pages(page_mapping(page),
- page->index, 1, false);
- /* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(page))
+ if (page_mapped(vmf->page))
+ unmap_mapping_folio(folio);
+ /* Retry if a clean folio was removed from the cache. */
+ if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
- lock_page(vmf->page);
+ folio_lock(folio);
else
- VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+ VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}
@@ -4309,17 +4418,17 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t entry;
vm_fault_t ret = VM_FAULT_FALLBACK;
- if (!transhuge_vma_suitable(vma, haddr))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- page = compound_head(page);
- if (compound_order(page) != HPAGE_PMD_ORDER)
+ if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
/*
@@ -4328,7 +4437,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* check. This kind of THP just can be PTE mapped. Access to
* the corrupted subpage should trigger SIGBUS as expected.
*/
- if (unlikely(PageHasHWPoisoned(page)))
+ if (unlikely(folio_test_has_hwpoisoned(folio)))
return ret;
/*
@@ -4352,7 +4461,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
- page_add_file_rmap(page, vma, true);
+ folio_add_file_rmap_pmd(folio, page, vma);
/*
* deposit and withdraw with pmd lock held
@@ -4415,7 +4524,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
folio_add_lru_vma(folio, vma);
} else {
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
- folio_add_file_rmap_range(folio, page, nr, vma, false);
+ folio_add_file_rmap_ptes(folio, page, nr, vma);
}
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
@@ -4641,6 +4750,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
static vm_fault_t do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
ret = vmf_can_call_fault(vmf);
@@ -4649,16 +4759,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (ret)
return ret;
- vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
- if (!vmf->cow_page)
+ folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
+ if (!folio)
return VM_FAULT_OOM;
- if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
- GFP_KERNEL)) {
- put_page(vmf->cow_page);
- return VM_FAULT_OOM;
- }
- folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
+ vmf->cow_page = &folio->page;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -4667,7 +4772,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
return ret;
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
- __SetPageUptodate(vmf->cow_page);
+ __folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
unlock_page(vmf->page);
@@ -4676,7 +4781,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
goto uncharge_out;
return ret;
uncharge_out:
- put_page(vmf->cow_page);
+ folio_put(folio);
return ret;
}
@@ -5113,7 +5218,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5147,7 +5252,7 @@ retry_pud:
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5850,7 +5955,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
- maddr = kmap(page);
+ maddr = kmap_local_page(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
@@ -5859,8 +5964,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
- kunmap(page);
- put_page(page);
+ unmap_and_put_page(page, maddr);
}
len -= bytes;
buf += bytes;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7a5fc89a8652..b3c0ff52bb72 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -645,7 +645,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
unsigned long pfn;
/*
- * Online the pages in MAX_ORDER aligned chunks. The callback might
+ * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this
* zone ("present").
@@ -660,12 +660,13 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* Free to online pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (pfn)
- order = min_t(int, MAX_ORDER, __ffs(pfn));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
(*online_page_callback)(pfn_to_page(pfn), order);
pfn += (1UL << order);
@@ -1380,6 +1381,85 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
return arch_supports_memmap_on_memory(vmemmap_size);
}
+static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+
+ /*
+ * For memmap_on_memory, the altmaps were added on a per-memblock
+ * basis; we have to process each individual memory block.
+ */
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct vmem_altmap *altmap = NULL;
+ struct memory_block *mem;
+
+ mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
+ if (WARN_ON_ONCE(!mem))
+ continue;
+
+ altmap = mem->altmap;
+ mem->altmap = NULL;
+
+ remove_memory_block_devices(cur_start, memblock_size);
+
+ arch_remove_memory(cur_start, memblock_size, altmap);
+
+ /* Verify that all vmemmap pages have actually been freed. */
+ WARN(altmap->alloc, "Altmap not fully unmapped");
+ kfree(altmap);
+ }
+}
+
+static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
+ u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+ int ret;
+
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct mhp_params params = { .pgprot =
+ pgprot_mhp(PAGE_KERNEL) };
+ struct vmem_altmap mhp_altmap = {
+ .base_pfn = PHYS_PFN(cur_start),
+ .end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
+ };
+
+ mhp_altmap.free = memory_block_memmap_on_memory_pages();
+ params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
+ GFP_KERNEL);
+ if (!params.altmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, cur_start, memblock_size, &params);
+ if (ret < 0) {
+ kfree(params.altmap);
+ goto out;
+ }
+
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(cur_start, memblock_size,
+ params.altmap, group);
+ if (ret) {
+ arch_remove_memory(cur_start, memblock_size, NULL);
+ kfree(params.altmap);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (ret && cur_start != start)
+ remove_memory_blocks_and_altmaps(start, cur_start - start);
+ return ret;
+}
+
/*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations (triggered e.g. by sysfs).
@@ -1390,10 +1470,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
- struct vmem_altmap mhp_altmap = {
- .base_pfn = PHYS_PFN(res->start),
- .end_pfn = PHYS_PFN(res->end),
- };
struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
@@ -1436,30 +1512,22 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/*
* Self hosted memmap array
*/
- if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
- if (mhp_supports_memmap_on_memory(size)) {
- mhp_altmap.free = memory_block_memmap_on_memory_pages();
- params.altmap = kmalloc(sizeof(struct vmem_altmap), GFP_KERNEL);
- if (!params.altmap) {
- ret = -ENOMEM;
- goto error;
- }
+ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
+ mhp_supports_memmap_on_memory(memory_block_size_bytes())) {
+ ret = create_altmaps_and_memory_blocks(nid, group, start, size);
+ if (ret)
+ goto error;
+ } else {
+ ret = arch_add_memory(nid, start, size, &params);
+ if (ret < 0)
+ goto error;
- memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap));
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size, NULL, group);
+ if (ret) {
+ arch_remove_memory(start, size, params.altmap);
+ goto error;
}
- /* fallback to not using altmap */
- }
-
- /* call arch's memory hotadd */
- ret = arch_add_memory(nid, start, size, &params);
- if (ret < 0)
- goto error_free;
-
- /* create memory block devices after memory was added */
- ret = create_memory_block_devices(start, size, params.altmap, group);
- if (ret) {
- arch_remove_memory(start, size, params.altmap);
- goto error_free;
}
if (new_node) {
@@ -1496,8 +1564,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
-error_free:
- kfree(params.altmap);
error:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
@@ -2067,17 +2133,13 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
return 0;
}
-static int test_has_altmap_cb(struct memory_block *mem, void *arg)
+static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
{
- struct memory_block **mem_ptr = (struct memory_block **)arg;
- /*
- * return the memblock if we have altmap
- * and break callback.
- */
- if (mem->altmap) {
- *mem_ptr = mem;
- return 1;
- }
+ u64 *num_altmaps = (u64 *)arg;
+
+ if (mem->altmap)
+ *num_altmaps += 1;
+
return 0;
}
@@ -2151,11 +2213,29 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
+static int memory_blocks_have_altmaps(u64 start, u64 size)
+{
+ u64 num_memblocks = size / memory_block_size_bytes();
+ u64 num_altmaps = 0;
+
+ if (!mhp_memmap_on_memory())
+ return 0;
+
+ walk_memory_blocks(start, size, &num_altmaps,
+ count_memory_range_altmaps_cb);
+
+ if (num_altmaps == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(num_memblocks != num_altmaps))
+ return -EINVAL;
+
+ return 1;
+}
+
static int __ref try_remove_memory(u64 start, u64 size)
{
- struct memory_block *mem;
- int rc = 0, nid = NUMA_NO_NODE;
- struct vmem_altmap *altmap = NULL;
+ int rc, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
@@ -2172,45 +2252,26 @@ static int __ref try_remove_memory(u64 start, u64 size)
if (rc)
return rc;
- /*
- * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
- * the same granularity it was added - a single memory block.
- */
- if (mhp_memmap_on_memory()) {
- rc = walk_memory_blocks(start, size, &mem, test_has_altmap_cb);
- if (rc) {
- if (size != memory_block_size_bytes()) {
- pr_warn("Refuse to remove %#llx - %#llx,"
- "wrong granularity\n",
- start, start + size);
- return -EINVAL;
- }
- altmap = mem->altmap;
- /*
- * Mark altmap NULL so that we can add a debug
- * check on memblock free.
- */
- mem->altmap = NULL;
- }
- }
-
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
- /*
- * Memory block device removal under the device_hotplug_lock is
- * a barrier against racing online attempts.
- */
- remove_memory_block_devices(start, size);
-
mem_hotplug_begin();
- arch_remove_memory(start, size, altmap);
-
- /* Verify that all vmemmap pages have actually been freed. */
- if (altmap) {
- WARN(altmap->alloc, "Altmap not fully unmapped");
- kfree(altmap);
+ rc = memory_blocks_have_altmaps(start, size);
+ if (rc < 0) {
+ mem_hotplug_done();
+ return rc;
+ } else if (!rc) {
+ /*
+ * Memory block device removal under the device_hotplug_lock is
+ * a barrier against racing online attempts.
+ * No altmaps present, do the removal directly
+ */
+ remove_memory_block_devices(start, size);
+ arch_remove_memory(start, size, NULL);
+ } else {
+ /* all memblocks in the range have altmaps */
+ remove_memory_blocks_and_altmaps(start, size);
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
diff --git a/mm/mempool.c b/mm/mempool.c
index 734bcf5afbb7..dbbf0e9fb424 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -20,7 +20,7 @@
#include <linux/writeback.h>
#include "slab.h"
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#ifdef CONFIG_SLUB_DEBUG_ON
static void poison_error(mempool_t *pool, void *element, size_t size,
size_t byte)
{
@@ -56,6 +56,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
+ /* Skip checking: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
@@ -64,10 +68,10 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
@@ -81,6 +85,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
+ /* Skip poisoning: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
@@ -89,47 +97,49 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
-#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+#else /* CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
static inline void poison_element(mempool_t *pool, void *element)
{
}
-#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+#endif /* CONFIG_SLUB_DEBUG_ON */
-static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_slab_free_mempool(element);
+ return kasan_mempool_poison_object(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_poison_pages(element, (unsigned long)pool->pool_data,
- false);
+ return kasan_mempool_poison_pages(element,
+ (unsigned long)pool->pool_data);
+ return true;
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_kmalloc)
- kasan_unpoison_range(element, (size_t)pool->pool_data);
+ kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_slab)
- kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
+ kasan_mempool_unpoison_object(element,
+ kmem_cache_size(pool->pool_data));
else if (pool->alloc == mempool_alloc_pages)
- kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
- false);
+ kasan_mempool_unpoison_pages(element,
+ (unsigned long)pool->pool_data);
}
static __always_inline void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
- kasan_poison_element(pool, element);
- pool->elements[pool->curr_nr++] = element;
+ if (kasan_poison_element(pool, element))
+ pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
@@ -447,6 +457,43 @@ repeat_alloc:
EXPORT_SYMBOL(mempool_alloc);
/**
+ * mempool_alloc_preallocated - allocate an element from preallocated elements
+ * belonging to a specific memory pool
+ * @pool: pointer to the memory pool which was allocated via
+ * mempool_create().
+ *
+ * This function is similar to mempool_alloc, but it only attempts allocating
+ * an element from the preallocated elements. It does not sleep and immediately
+ * returns if no preallocated elements are available.
+ *
+ * Return: pointer to the allocated element or %NULL if no elements are
+ * available.
+ */
+void *mempool_alloc_preallocated(mempool_t *pool)
+{
+ void *element;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (likely(pool->curr_nr)) {
+ element = remove_element(pool);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ /* paired with rmb in mempool_free(), read comment there */
+ smp_wmb();
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(element);
+ return element;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mempool_alloc_preallocated);
+
+/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via
diff --git a/mm/memremap.c b/mm/memremap.c
index bee85560a243..9e9fb1972fff 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -7,6 +7,7 @@
#include <linux/memremap.h>
#include <linux/pfn_t.h>
#include <linux/swap.h>
+#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/swapops.h>
#include <linux/types.h>
@@ -422,19 +423,6 @@ void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
}
EXPORT_SYMBOL_GPL(devm_memunmap_pages);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- /* number of pfns from base where pfn_to_page() is valid */
- if (altmap)
- return altmap->reserve + altmap->free;
- return 0;
-}
-
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
-{
- altmap->alloc -= nr_pfns;
-}
-
/**
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map
@@ -485,21 +473,11 @@ void free_zone_device_page(struct page *page)
__ClearPageAnonExclusive(page);
/*
- * When a device managed page is freed, the page->mapping field
+ * When a device managed page is freed, the folio->mapping field
* may still contain a (stale) mapping value. For example, the
- * lower bits of page->mapping may still identify the page as an
- * anonymous page. Ultimately, this entire field is just stale
- * and wrong, and it will cause errors if not cleared. One
- * example is:
- *
- * migrate_vma_pages()
- * migrate_vma_insert_page()
- * page_add_new_anon_rmap()
- * __page_set_anon_rmap()
- * ...checks page->mapping, via PageAnon(page) call,
- * and incorrectly concludes that the page is an
- * anonymous page. Therefore, it incorrectly,
- * silently fails to set up the new anon rmap.
+ * lower bits of folio->mapping may still identify the folio as an
+ * anonymous folio. Ultimately, this entire field is just stale
+ * and wrong, and it will cause errors if not cleared.
*
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
diff --git a/mm/migrate.c b/mm/migrate.c
index 397f2a6e34cb..bde8273cf15b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -249,20 +249,20 @@ static bool remove_migration_pte(struct folio *folio,
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
- hugepage_add_anon_rmap(folio, vma, pvmw.address,
- rmap_flags);
+ hugetlb_add_anon_rmap(folio, vma, pvmw.address,
+ rmap_flags);
else
- page_dup_file_rmap(new, true);
+ hugetlb_add_file_rmap(folio);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
psize);
} else
#endif
{
if (folio_test_anon(folio))
- page_add_anon_rmap(new, vma, pvmw.address,
- rmap_flags);
+ folio_add_anon_rmap_pte(folio, new, vma,
+ pvmw.address, rmap_flags);
else
- page_add_file_rmap(new, vma, false);
+ folio_add_file_rmap_pte(folio, new, vma);
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
if (vma->vm_flags & VM_LOCKED)
@@ -753,7 +753,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
recheck_buffers:
busy = false;
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
bh = head;
do {
if (atomic_read(&bh->b_count)) {
@@ -767,7 +767,7 @@ recheck_buffers:
rc = -EAGAIN;
goto unlock_buffers;
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
@@ -794,7 +794,7 @@ recheck_buffers:
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
if (check_refs)
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
bh = head;
do {
unlock_buffer(bh);
@@ -1025,38 +1025,31 @@ out:
}
/*
- * To record some information during migration, we use some unused
- * fields (mapping and private) of struct folio of the newly allocated
- * destination folio. This is safe because nobody is using them
- * except us.
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
*/
-union migration_ptr {
- struct anon_vma *anon_vma;
- struct address_space *mapping;
-};
-
enum {
PAGE_WAS_MAPPED = BIT(0),
PAGE_WAS_MLOCKED = BIT(1),
+ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
};
static void __migrate_folio_record(struct folio *dst,
- unsigned long old_page_state,
+ int old_page_state,
struct anon_vma *anon_vma)
{
- union migration_ptr ptr = { .anon_vma = anon_vma };
- dst->mapping = ptr.mapping;
- dst->private = (void *)old_page_state;
+ dst->private = (void *)anon_vma + old_page_state;
}
static void __migrate_folio_extract(struct folio *dst,
int *old_page_state,
struct anon_vma **anon_vmap)
{
- union migration_ptr ptr = { .mapping = dst->mapping };
- *anon_vmap = ptr.anon_vma;
- *old_page_state = (unsigned long)dst->private;
- dst->mapping = NULL;
+ unsigned long private = (unsigned long)dst->private;
+
+ *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
+ *old_page_state = private & PAGE_OLD_STATES;
dst->private = NULL;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 8ac1f79f754a..b6c27c76e1a0 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -107,6 +107,7 @@ again:
for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn = 0, pfn;
+ struct folio *folio;
struct page *page;
swp_entry_t entry;
pte_t pte;
@@ -168,41 +169,43 @@ again:
}
/*
- * By getting a reference on the page we pin it and that blocks
+ * By getting a reference on the folio we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the
* pte.
*
- * We drop this reference after isolating the page from the lru
- * for non device page (device page are not on the lru and thus
+ * We drop this reference after isolating the folio from the lru
+ * for non device folio (device folio are not on the lru and thus
* can't be dropped from it).
*/
- get_page(page);
+ folio = page_folio(page);
+ folio_get(folio);
/*
- * We rely on trylock_page() to avoid deadlock between
+ * We rely on folio_trylock() to avoid deadlock between
* concurrent migrations where each is waiting on the others
- * page lock. If we can't immediately lock the page we fail this
+ * folio lock. If we can't immediately lock the folio we fail this
* migration as it is only best effort anyway.
*
- * If we can lock the page it's safe to set up a migration entry
- * now. In the common case where the page is mapped once in a
+ * If we can lock the folio it's safe to set up a migration entry
+ * now. In the common case where the folio is mapped once in a
* single process setting up the migration entry now is an
* optimisation to avoid walking the rmap later with
* try_to_migrate().
*/
- if (trylock_page(page)) {
+ if (folio_trylock(folio)) {
bool anon_exclusive;
pte_t swp_pte;
flush_cache_page(vma, addr, pte_pfn(pte));
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
+ anon_exclusive = folio_test_anon(folio) &&
+ PageAnonExclusive(page);
if (anon_exclusive) {
pte = ptep_clear_flush(vma, addr, ptep);
- if (page_try_share_anon_rmap(page)) {
+ if (folio_try_share_anon_rmap_pte(folio, page)) {
set_pte_at(mm, addr, ptep, pte);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
mpfn = 0;
goto next;
}
@@ -214,7 +217,7 @@ again:
/* Set the dirty flag on the folio now the pte is gone. */
if (pte_dirty(pte))
- folio_mark_dirty(page_folio(page));
+ folio_mark_dirty(folio);
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
@@ -248,16 +251,16 @@ again:
/*
* This is like regular unmap: we remove the rmap and
- * drop page refcount. Page won't be freed, as we took
- * a reference just above.
+ * drop the folio refcount. The folio won't be freed, as
+ * we took a reference just above.
*/
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
if (pte_present(pte))
unmapped++;
} else {
- put_page(page);
+ folio_put(folio);
mpfn = 0;
}
@@ -564,6 +567,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
struct page *page,
unsigned long *src)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
bool flush = false;
@@ -596,17 +600,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort;
if (unlikely(anon_vma_prepare(vma)))
goto abort;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto abort;
/*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceding stores to the page contents become visible before
+ * The memory barrier inside __folio_mark_uptodate makes sure that
+ * preceding stores to the folio contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
- if (is_device_private_page(page)) {
+ if (folio_is_device_private(folio)) {
swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE)
@@ -617,8 +621,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry);
} else {
- if (is_zone_device_page(page) &&
- !is_device_coherent_page(page)) {
+ if (folio_is_zone_device(folio) &&
+ !folio_is_device_coherent(folio)) {
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
}
@@ -652,10 +656,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto unlock_abort;
inc_mm_counter(mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, addr);
- if (!is_zone_device_page(page))
- lru_cache_add_inactive_or_unevictable(page, vma);
- get_page(page);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ if (!folio_is_zone_device(folio))
+ folio_add_lru_vma(folio, vma);
+ folio_get(folio);
if (flush) {
flush_cache_page(vma, addr, pte_pfn(orig_pte));
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 077bfe393b5e..89dc29f1e6c6 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -796,6 +796,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* - physical memory bank size is not necessarily the exact multiple of the
* arbitrary section size
* - early reserved memory may not be listed in memblock.memory
+ * - non-memory regions covered by the contigious flatmem mapping
* - memory layouts defined with memmap= kernel parameter may not align
* nicely with memmap sections
*
@@ -826,7 +827,7 @@ static void __init init_unavailable_range(unsigned long spfn,
}
if (pgcnt)
- pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
+ pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
node, zone_names[zone], pgcnt);
}
@@ -1454,7 +1455,7 @@ static inline void setup_usemap(struct zone *zone) {}
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
void __init set_pageblock_order(void)
{
- unsigned int order = MAX_ORDER;
+ unsigned int order = MAX_PAGE_ORDER;
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
@@ -1466,8 +1467,7 @@ void __init set_pageblock_order(void)
/*
* Assume the largest contiguous order of interest is a huge page.
- * This value may be variable depending on boot parameters on IA64 and
- * powerpc.
+ * This value may be variable depending on boot parameters on powerpc.
*/
pageblock_order = order;
}
@@ -1628,8 +1628,8 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
#ifdef CONFIG_FLATMEM
static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
- unsigned long __maybe_unused start = 0;
- unsigned long __maybe_unused offset = 0;
+ unsigned long start, offset, size, end;
+ struct page *map;
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
@@ -1637,33 +1637,24 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
- /* ia64 gets its own node_mem_map, before this, without bootmem */
- if (!pgdat->node_mem_map) {
- unsigned long size, end;
- struct page *map;
-
- /*
- * The zone's endpoints aren't required to be MAX_ORDER
- * aligned but the node_mem_map endpoints must be in order
- * for the buddy allocator to function correctly.
- */
- end = pgdat_end_pfn(pgdat);
- end = ALIGN(end, MAX_ORDER_NR_PAGES);
- size = (end - start) * sizeof(struct page);
- map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
- pgdat->node_id, false);
- if (!map)
- panic("Failed to allocate %ld bytes for node %d memory map\n",
- size, pgdat->node_id);
- pgdat->node_mem_map = map + offset;
- }
- pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
- __func__, pgdat->node_id, (unsigned long)pgdat,
- (unsigned long)pgdat->node_mem_map);
-#ifndef CONFIG_NUMA
/*
- * With no DISCONTIG, the global mem_map is just set as node 0's
+ * The zone's endpoints aren't required to be MAX_PAGE_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
*/
+ end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
+ map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ pgdat->node_id, false);
+ if (!map)
+ panic("Failed to allocate %ld bytes for node %d memory map\n",
+ size, pgdat->node_id);
+ pgdat->node_mem_map = map + offset;
+ pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
+ __func__, pgdat->node_id, (unsigned long)pgdat,
+ (unsigned long)pgdat->node_mem_map);
+#ifndef CONFIG_NUMA
+ /* the global mem_map is just set as node 0's */
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
@@ -1973,11 +1964,11 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
- __free_pages_core(page, MAX_ORDER);
+ __free_pages_core(page, MAX_PAGE_ORDER);
return;
}
- /* Accept chunks smaller than MAX_ORDER upfront */
+ /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
for (i = 0; i < nr_pages; i++, page++, pfn++) {
@@ -2000,8 +1991,8 @@ static inline void __init pgdat_init_report_one_done(void)
/*
* Returns true if page needs to be initialized or freed to buddy allocator.
*
- * We check if a current MAX_ORDER block is valid by only checking the validity
- * of the head pfn.
+ * We check if a current MAX_PAGE_ORDER block is valid by only checking the
+ * validity of the head pfn.
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
@@ -2158,8 +2149,8 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
/*
- * Initialize and free pages in MAX_ORDER sized increments so that we
- * can avoid introducing any issues with the buddy allocator.
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
+ * we can avoid introducing any issues with the buddy allocator.
*/
while (spfn < end_pfn) {
deferred_init_maxorder(&i, zone, &spfn, &epfn);
@@ -2300,7 +2291,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
}
/*
- * Initialize and free pages in MAX_ORDER sized increments so
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so
* that we can avoid introducing any issues with the buddy
* allocator.
*/
@@ -2518,7 +2509,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (get_order(size) > MAX_ORDER || hashdist) {
+ } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
table = vmalloc_huge(size, gfp_flags);
virt = true;
if (table)
@@ -2765,7 +2756,7 @@ void __init mm_core_init(void)
/*
* page_ext requires contiguous pages,
- * bigger than MAX_ORDER unless SPARSEMEM.
+ * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
mem_debugging_and_hardening_init();
diff --git a/mm/mmap.c b/mm/mmap.c
index 1971bfffcc03..b78e83d351d2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1829,6 +1829,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ /* Ensures that larger anonymous mappings are THP aligned. */
+ get_area = thp_get_unmapped_area;
}
addr = get_area(file, addr, len, pgoff, flags);
@@ -2207,42 +2210,7 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned lon
}
#endif
-/*
- * IA64 has some horrid mapping rules: it can expand both up and down,
- * but with various special rules.
- *
- * We'll get rid of this architecture eventually, so the ugliness is
- * temporary.
- */
-#ifdef CONFIG_IA64
-static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
-{
- return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
- REGION_OFFSET(addr) < RGN_MAP_LIMIT;
-}
-
-/*
- * IA64 stacks grow down, but there's a special register backing store
- * that can grow up. Only sequentially, though, so the new address must
- * match vm_end.
- */
-static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
-{
- if (!vma_expand_ok(vma, addr))
- return -EFAULT;
- if (vma->vm_end != (addr & PAGE_MASK))
- return -EFAULT;
- return expand_upwards(vma, addr);
-}
-
-static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
-{
- if (!vma_expand_ok(vma, addr))
- return -EFAULT;
- return expand_downwards(vma, addr);
-}
-
-#elif defined(CONFIG_STACK_GROWSUP)
+#if defined(CONFIG_STACK_GROWSUP)
#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
#define vma_expand_down(vma, addr) (-EFAULT)
@@ -3294,10 +3262,11 @@ void exit_mmap(struct mm_struct *mm)
arch_exit_mmap(mm);
vma = mas_find(&mas, ULONG_MAX);
- if (!vma) {
+ if (!vma || unlikely(xa_is_zero(vma))) {
/* Can happen if dup_mmap() received an OOM */
mmap_read_unlock(mm);
- return;
+ mmap_write_lock(mm);
+ goto destroy;
}
lru_add_drain();
@@ -3332,11 +3301,13 @@ void exit_mmap(struct mm_struct *mm)
remove_vma(vma, true);
count++;
cond_resched();
- } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
+ vma = mas_find(&mas, ULONG_MAX);
+ } while (vma && likely(!xa_is_zero(vma)));
BUG_ON(count != mm->map_count);
trace_exit_mmap(mm);
+destroy:
__mt_destroy(&mm->mm_mt);
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 4f559f4ddd21..604ddf08affe 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -55,7 +55,7 @@ static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_
if (encoded_page_flags(enc)) {
struct page *page = encoded_page_ptr(enc);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(page_folio(page), page, vma);
}
}
}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index b594d3f268fe..c01896eca736 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -78,6 +78,7 @@ void lruvec_init(struct lruvec *lruvec)
memset(lruvec, 0, sizeof(struct lruvec));
spin_lock_init(&lruvec->lru_lock);
+ zswap_lruvec_state_init(lruvec);
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9e6071fde34a..91ccd82097c2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -399,10 +399,11 @@ static int dump_task(struct task_struct *p, void *arg)
return 0;
}
- pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
+ pr_info("[%7d] %5d %5d %8lu %8lu %8lu %8lu %9lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- mm_pgtables_bytes(task->mm),
+ get_mm_counter(task->mm, MM_ANONPAGES), get_mm_counter(task->mm, MM_FILEPAGES),
+ get_mm_counter(task->mm, MM_SHMEMPAGES), mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
@@ -423,7 +424,7 @@ static int dump_task(struct task_struct *p, void *arg)
static void dump_tasks(struct oom_control *oc)
{
pr_info("Tasks state (memory values in pages):\n");
- pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss rss_anon rss_file rss_shmem pgtables_bytes swapents oom_score_adj name\n");
if (is_memcg_oom(oc))
mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ee2fd6a6af40..cd4e4ae77c40 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -692,7 +692,6 @@ static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ra
if (min_ratio > 100 * BDI_RATIO_SCALE)
return -EINVAL;
- min_ratio *= BDI_RATIO_SCALE;
spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
@@ -729,7 +728,8 @@ static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ra
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
- bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
+ bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
+ (100 * BDI_RATIO_SCALE);
}
spin_unlock_bh(&bdi_lock);
@@ -2982,67 +2982,63 @@ bool __folio_end_writeback(struct folio *folio)
return ret;
}
-bool __folio_start_writeback(struct folio *folio, bool keep_write)
+void __folio_start_writeback(struct folio *folio, bool keep_write)
{
long nr = folio_nr_pages(folio);
struct address_space *mapping = folio_mapping(folio);
- bool ret;
int access_ret;
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
+
folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;
+ bool on_wblist;
xas_lock_irqsave(&xas, flags);
xas_load(&xas);
- ret = folio_test_set_writeback(folio);
- if (!ret) {
- bool on_wblist;
+ folio_test_set_writeback(folio);
- on_wblist = mapping_tagged(mapping,
- PAGECACHE_TAG_WRITEBACK);
+ on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
- xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
- if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
- struct bdi_writeback *wb = inode_to_wb(inode);
-
- wb_stat_mod(wb, WB_WRITEBACK, nr);
- if (!on_wblist)
- wb_inode_writeback_start(wb);
- }
+ xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+ struct bdi_writeback *wb = inode_to_wb(inode);
- /*
- * We can come through here when swapping
- * anonymous folios, so we don't necessarily
- * have an inode to track for sync.
- */
- if (mapping->host && !on_wblist)
- sb_mark_inode_writeback(mapping->host);
+ wb_stat_mod(wb, WB_WRITEBACK, nr);
+ if (!on_wblist)
+ wb_inode_writeback_start(wb);
}
+
+ /*
+ * We can come through here when swapping anonymous
+ * folios, so we don't necessarily have an inode to
+ * track for sync.
+ */
+ if (mapping->host && !on_wblist)
+ sb_mark_inode_writeback(mapping->host);
if (!folio_test_dirty(folio))
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
if (!keep_write)
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irqrestore(&xas, flags);
} else {
- ret = folio_test_set_writeback(folio);
- }
- if (!ret) {
- lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
+ folio_test_set_writeback(folio);
}
+
+ lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
folio_memcg_unlock(folio);
+
access_ret = arch_make_folio_accessible(folio);
/*
* If writeback has been triggered on a page that cannot be made
* accessible, it is too late to recover here.
*/
VM_BUG_ON_FOLIO(access_ret != 0, folio);
-
- return ret;
}
EXPORT_SYMBOL(__folio_start_writeback);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 733732e7e0ba..a01baf0454f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -727,7 +727,7 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
unsigned long higher_page_pfn;
struct page *higher_page;
- if (order >= MAX_ORDER - 1)
+ if (order >= MAX_PAGE_ORDER - 1)
return false;
higher_page_pfn = buddy_pfn & pfn;
@@ -782,7 +782,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
- while (order < MAX_ORDER) {
+ while (order < MAX_PAGE_ORDER) {
if (compaction_capture(capc, page, order, migratetype)) {
__mod_zone_freepage_state(zone, -(1 << order),
migratetype);
@@ -1059,7 +1059,7 @@ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return deferred_pages_enabled();
- return page_kasan_tag(page) == 0xff;
+ return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
static void kernel_init_pages(struct page *page, int numpages)
@@ -1086,13 +1086,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
trace_mm_page_free(page, order);
kmsan_free_page(page, order);
+ if (memcg_kmem_online() && PageMemcgKmem(page))
+ __memcg_kmem_uncharge_page(page, order);
+
if (unlikely(PageHWPoison(page)) && !order) {
- /*
- * Do not let hwpoison pages hit pcplists/buddy
- * Untie memcg state and reset page's owner
- */
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
+ /* Do not let hwpoison pages hit pcplists/buddy */
reset_page_owner(page, order);
page_table_check_free(page, order);
return false;
@@ -1123,8 +1121,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
if (PageMappingFlags(page))
page->mapping = NULL;
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
@@ -1259,7 +1255,6 @@ static void free_one_page(struct zone *zone,
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
- unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
@@ -1274,13 +1269,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
*/
migratetype = get_pfnblock_migratetype(page, pfn);
- spin_lock_irqsave(&zone->lock, flags);
- if (unlikely(has_isolate_pageblock(zone) ||
- is_migrate_isolate(migratetype))) {
- migratetype = get_pfnblock_migratetype(page, pfn);
- }
- __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
- spin_unlock_irqrestore(&zone->lock, flags);
+ free_one_page(zone, page, pfn, order, migratetype, fpi_flags);
__count_vm_events(PGFREE, 1 << order);
}
@@ -1308,7 +1297,7 @@ void __free_pages_core(struct page *page, unsigned int order)
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
if (page_contains_unaccepted(page, order)) {
- if (order == MAX_ORDER && __free_unaccepted(page))
+ if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
accept_page(page, order);
@@ -1338,7 +1327,7 @@ void __free_pages_core(struct page *page, unsigned int order)
*
* Note: the function may return non-NULL struct page even for a page block
* which contains a memory hole (i.e. there is no physical memory for a subset
- * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
+ * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
* will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
* even though the start pfn is online and valid. This should be safe most of
* the time because struct pages are still initialized via init_unavailable_range()
@@ -1571,7 +1560,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
struct page *page;
/* Find a page of the appropriate size in the preferred list */
- for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype);
if (!page)
@@ -1884,10 +1873,14 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
unsigned long max_managed, flags;
/*
- * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
+ * The number reserved as: minimum is 1 pageblock, maximum is
+ * roughly 1% of a zone. But if 1% of a zone falls below a
+ * pageblock size, then don't reserve any pageblocks.
* Check is race-prone but harmless.
*/
- max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
+ if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
+ return;
+ max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
if (zone->nr_reserved_highatomic >= max_managed)
return;
@@ -1941,7 +1934,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
continue;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
@@ -2025,7 +2018,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
* approximates finding the pageblock with the most free pages, which
* would be too costly to do exactly.
*/
- for (current_order = MAX_ORDER; current_order >= min_order;
+ for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
@@ -2051,8 +2044,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
return false;
find_smallest:
- for (current_order = order; current_order <= MAX_ORDER;
- current_order++) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, false, &can_steal);
@@ -2064,7 +2056,7 @@ find_smallest:
* This should not happen - we already found a suitable fallback
* when looking for the largest page.
*/
- VM_BUG_ON(current_order > MAX_ORDER);
+ VM_BUG_ON(current_order > MAX_PAGE_ORDER);
do_steal:
page = get_page_from_free_area(area, fallback_mt);
@@ -3007,7 +2999,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
/* For a high-order request, check at least one suitable page is free */
- for (o = order; o <= MAX_ORDER; o++) {
+ for (o = order; o < NR_PAGE_ORDERS; o++) {
struct free_area *area = &z->free_area[o];
int mt;
@@ -3951,14 +3943,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
else
(*no_progress_loops)++;
- /*
- * Make sure we converge to OOM if we cannot make any progress
- * several times in the row.
- */
- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
- /* Before OOM, exhaust highatomic_reserve */
- return unreserve_highatomic_pageblock(ac, true);
- }
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
+ goto out;
+
/*
* Keep reclaiming pages while there is a chance this will lead
@@ -4001,6 +3988,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
schedule_timeout_uninterruptible(1);
else
cond_resched();
+out:
+ /* Before OOM, exhaust highatomic_reserve */
+ if (!ret)
+ return unreserve_highatomic_pageblock(ac, true);
+
return ret;
}
@@ -4541,7 +4533,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
* There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound.
*/
- if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
+ if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
return NULL;
gfp &= gfp_allowed_mask;
@@ -4823,7 +4815,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
* minimum number of pages to satisfy the request. alloc_pages() can only
* allocate memory in power-of-two pages.
*
- * This function is also limited by MAX_ORDER.
+ * This function is also limited by MAX_PAGE_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
*
@@ -6381,7 +6373,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
order = 0;
outer_start = start;
while (!PageBuddy(pfn_to_page(outer_start))) {
- if (++order > MAX_ORDER) {
+ if (++order > MAX_PAGE_ORDER) {
outer_start = start;
break;
}
@@ -6635,7 +6627,7 @@ bool is_free_buddy_page(struct page *page)
unsigned long pfn = page_to_pfn(page);
unsigned int order;
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(page_head) &&
@@ -6643,7 +6635,7 @@ bool is_free_buddy_page(struct page *page)
break;
}
- return order <= MAX_ORDER;
+ return order <= MAX_PAGE_ORDER;
}
EXPORT_SYMBOL(is_free_buddy_page);
@@ -6690,7 +6682,7 @@ bool take_page_off_buddy(struct page *page)
bool ret = false;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
int page_order = buddy_order(page_head);
@@ -6815,9 +6807,9 @@ static bool try_to_accept_memory_one(struct zone *zone)
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
- accept_page(page, MAX_ORDER);
+ accept_page(page, MAX_PAGE_ORDER);
- __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
if (last)
static_branch_dec(&zones_with_unaccepted_pages);
diff --git a/mm/page_io.c b/mm/page_io.c
index cb559ae324c6..ae2b49055e43 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -201,7 +201,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_end_writeback(folio);
return 0;
}
- __swap_writepage(&folio->page, wbc);
+ if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
+ folio_mark_dirty(folio);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+
+ __swap_writepage(folio, wbc);
return 0;
}
@@ -288,16 +293,16 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
-static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
+static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
{
struct swap_iocb *sio = NULL;
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct file *swap_file = sis->swap_file;
- loff_t pos = page_file_offset(page);
+ loff_t pos = folio_file_pos(folio);
- count_swpout_vm_event(page_folio(page));
- set_page_writeback(page);
- unlock_page(page);
+ count_swpout_vm_event(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
if (wbc->swap_plug)
sio = *wbc->swap_plug;
if (sio) {
@@ -315,8 +320,8 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
sio->pages = 0;
sio->len = 0;
}
- bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
- sio->len += thp_size(page);
+ bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ sio->len += folio_size(folio);
sio->pages += 1;
if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
swap_write_unplug(sio);
@@ -326,17 +331,16 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
*wbc->swap_plug = sio;
}
-static void swap_writepage_bdev_sync(struct page *page,
+static void swap_writepage_bdev_sync(struct folio *folio,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
struct bio_vec bv;
struct bio bio;
- struct folio *folio = page_folio(page);
bio_init(&bio, sis->bdev, &bv, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
- bio.bi_iter.bi_sector = swap_page_sector(page);
- __bio_add_page(&bio, page, thp_size(page), 0);
+ bio.bi_iter.bi_sector = swap_folio_sector(folio);
+ bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
bio_associate_blkg_from_page(&bio, folio);
count_swpout_vm_event(folio);
@@ -348,18 +352,17 @@ static void swap_writepage_bdev_sync(struct page *page,
__end_swap_bio_write(&bio);
}
-static void swap_writepage_bdev_async(struct page *page,
+static void swap_writepage_bdev_async(struct folio *folio,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
struct bio *bio;
- struct folio *folio = page_folio(page);
bio = bio_alloc(sis->bdev, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
GFP_NOIO);
- bio->bi_iter.bi_sector = swap_page_sector(page);
+ bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_write;
- __bio_add_page(bio, page, thp_size(page), 0);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
bio_associate_blkg_from_page(bio, folio);
count_swpout_vm_event(folio);
@@ -368,22 +371,22 @@ static void swap_writepage_bdev_async(struct page *page,
submit_bio(bio);
}
-void __swap_writepage(struct page *page, struct writeback_control *wbc)
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/*
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
if (data_race(sis->flags & SWP_FS_OPS))
- swap_writepage_fs(page, wbc);
+ swap_writepage_fs(folio, wbc);
else if (sis->flags & SWP_SYNCHRONOUS_IO)
- swap_writepage_bdev_sync(page, wbc, sis);
+ swap_writepage_bdev_sync(folio, wbc, sis);
else
- swap_writepage_bdev_async(page, wbc, sis);
+ swap_writepage_bdev_async(folio, wbc, sis);
}
void swap_write_unplug(struct swap_iocb *sio)
@@ -422,12 +425,11 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
-static void swap_readpage_fs(struct page *page,
- struct swap_iocb **plug)
+static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct swap_iocb *sio = NULL;
- loff_t pos = page_file_offset(page);
+ loff_t pos = folio_file_pos(folio);
if (plug)
sio = *plug;
@@ -446,8 +448,8 @@ static void swap_readpage_fs(struct page *page,
sio->pages = 0;
sio->len = 0;
}
- bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
- sio->len += thp_size(page);
+ bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ sio->len += folio_size(folio);
sio->pages += 1;
if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
swap_read_unplug(sio);
@@ -457,15 +459,15 @@ static void swap_readpage_fs(struct page *page,
*plug = sio;
}
-static void swap_readpage_bdev_sync(struct page *page,
+static void swap_read_folio_bdev_sync(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio_vec bv;
struct bio bio;
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = swap_page_sector(page);
- __bio_add_page(&bio, page, thp_size(page), 0);
+ bio.bi_iter.bi_sector = swap_folio_sector(folio);
+ bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
/*
* Keep this task valid during swap readpage because the oom killer may
* attempt to access it in the page fault retry time check.
@@ -477,23 +479,23 @@ static void swap_readpage_bdev_sync(struct page *page,
put_task_struct(current);
}
-static void swap_readpage_bdev_async(struct page *page,
+static void swap_read_folio_bdev_async(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio *bio;
bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
- bio->bi_iter.bi_sector = swap_page_sector(page);
+ bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_read;
- __bio_add_page(bio, page, thp_size(page), 0);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
count_vm_event(PSWPIN);
submit_bio(bio);
}
-void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, bool synchronous,
+ struct swap_iocb **plug)
{
- struct folio *folio = page_folio(page);
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
bool workingset = folio_test_workingset(folio);
unsigned long pflags;
bool in_thrashing;
@@ -517,11 +519,11 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
folio_mark_uptodate(folio);
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
- swap_readpage_fs(page, plug);
+ swap_read_folio_fs(folio, plug);
} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
- swap_readpage_bdev_sync(page, sis);
+ swap_read_folio_bdev_sync(folio, sis);
} else {
- swap_readpage_bdev_async(page, sis);
+ swap_read_folio_bdev_async(folio, sis);
}
if (workingset) {
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index bcf99ba747a0..cd0ea3668253 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -226,7 +226,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
*/
if (PageBuddy(page)) {
order = buddy_order(page);
- if (order >= pageblock_order && order < MAX_ORDER) {
+ if (order >= pageblock_order && order < MAX_PAGE_ORDER) {
buddy = find_buddy_page_pfn(page, page_to_pfn(page),
order, NULL);
if (buddy && !is_migrate_isolate_page(buddy)) {
@@ -290,11 +290,12 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock()
* @migratetype: migrate type to set in error recovery.
*
- * Free and in-use pages can be as big as MAX_ORDER and contain more than one
+ * Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
* time, free page accounting can go wrong. For example, in the case of
- * MAX_ORDER = pageblock_order + 1, a MAX_ORDER page has two pagelbocks.
- * [ MAX_ORDER ]
+ * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
+ * pagelbocks.
+ * [ MAX_PAGE_ORDER ]
* [ pageblock0 | pageblock1 ]
* When either pageblock is isolated, if it is a free page, the page is not
* split into separate migratetype lists, which is supposed to; if it is an
@@ -451,7 +452,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* the free page to the right migratetype list.
*
* head_pfn is not used here as a hugetlb page order
- * can be bigger than MAX_ORDER, but after it is
+ * can be bigger than MAX_PAGE_ORDER, but after it is
* freed, the free page order is not. Use pfn within
* the range to find the head of the free page.
*/
@@ -459,7 +460,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
outer_pfn = pfn;
while (!PageBuddy(pfn_to_page(outer_pfn))) {
/* stop if we cannot find the free page */
- if (++order > MAX_ORDER)
+ if (++order > MAX_PAGE_ORDER)
goto failed;
outer_pfn &= ~0UL << order;
}
@@ -660,8 +661,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
- * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
- * are not aligned to pageblock_nr_pages.
+ * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
+ * pages are not aligned to pageblock_nr_pages.
* Then we just check migratetype first.
*/
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 4f13ce7d2452..5634e5d890f8 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -32,6 +32,8 @@ struct page_owner {
char comm[TASK_COMM_LEN];
pid_t pid;
pid_t tgid;
+ pid_t free_pid;
+ pid_t free_tgid;
};
static bool page_owner_enabled __initdata;
@@ -119,7 +121,6 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
* Sometimes page metadata allocation tracking requires more
* memory to be allocated:
* - when new stack trace is saved to stack depot
- * - when backtrace itself is calculated (ia64)
*/
if (current->in_page_owner)
return dummy_handle;
@@ -152,6 +153,8 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
page_owner->free_handle = handle;
page_owner->free_ts_nsec = free_ts_nsec;
+ page_owner->free_pid = current->pid;
+ page_owner->free_tgid = current->tgid;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
@@ -253,6 +256,8 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
new_page_owner->handle = old_page_owner->handle;
new_page_owner->pid = old_page_owner->pid;
new_page_owner->tgid = old_page_owner->tgid;
+ new_page_owner->free_pid = old_page_owner->free_pid;
+ new_page_owner->free_tgid = old_page_owner->free_tgid;
new_page_owner->ts_nsec = old_page_owner->ts_nsec;
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
strcpy(new_page_owner->comm, old_page_owner->comm);
@@ -315,7 +320,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
unsigned long freepage_order;
freepage_order = buddy_order_unsafe(page);
- if (freepage_order <= MAX_ORDER)
+ if (freepage_order <= MAX_PAGE_ORDER)
pfn += (1UL << freepage_order) - 1;
continue;
}
@@ -495,7 +500,8 @@ void __dump_page_owner(const struct page *page)
if (!handle) {
pr_alert("page_owner free stack trace missing\n");
} else {
- pr_alert("page last free stack trace:\n");
+ pr_alert("page last free pid %d tgid %d stack trace:\n",
+ page_owner->free_pid, page_owner->free_tgid);
stack_depot_print(handle);
}
@@ -549,7 +555,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (PageBuddy(page)) {
unsigned long freepage_order = buddy_order_unsafe(page);
- if (freepage_order <= MAX_ORDER)
+ if (freepage_order <= MAX_PAGE_ORDER)
pfn += (1UL << freepage_order) - 1;
continue;
}
@@ -657,7 +663,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageBuddy(page)) {
unsigned long order = buddy_order_unsafe(page);
- if (order > 0 && order <= MAX_ORDER)
+ if (order > 0 && order <= MAX_PAGE_ORDER)
pfn += (1UL << order) - 1;
continue;
}
diff --git a/mm/page_poison.c b/mm/page_poison.c
index b4f456437b7e..3e9037363cf9 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -21,13 +21,13 @@ early_param("page_poison", early_page_poison_param);
static void poison_page(struct page *page)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
/* KASAN still think the page is in-use, so skip it. */
kasan_disable_current();
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current();
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
void __kernel_poison_pages(struct page *page, int n)
@@ -77,7 +77,7 @@ static void unpoison_page(struct page *page)
{
void *addr;
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
kasan_disable_current();
/*
* Page poisoning when enabled poisons each and every page
@@ -86,7 +86,7 @@ static void unpoison_page(struct page *page)
*/
check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
kasan_enable_current();
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
void __kernel_unpoison_pages(struct page *page, int n)
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index b021f482a4cb..e4c428e61d8c 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -20,7 +20,7 @@ static int page_order_update_notify(const char *val, const struct kernel_param *
* If param is set beyond this limit, order is set to default
* pageblock_order value
*/
- return param_set_uint_minmax(val, kp, 0, MAX_ORDER);
+ return param_set_uint_minmax(val, kp, 0, MAX_PAGE_ORDER);
}
static const struct kernel_param_ops page_reporting_param_ops = {
@@ -276,7 +276,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
return err;
/* Process each free list starting from lowest order/mt */
- for (order = page_reporting_order; order <= MAX_ORDER; order++) {
+ for (order = page_reporting_order; order < NR_PAGE_ORDERS; order++) {
for (mt = 0; mt < MIGRATE_TYPES; mt++) {
/* We do not pull pages from the isolate free list */
if (is_migrate_isolate(mt))
@@ -370,7 +370,7 @@ int page_reporting_register(struct page_reporting_dev_info *prdev)
*/
if (page_reporting_order == -1) {
- if (prdev->order > 0 && prdev->order <= MAX_ORDER)
+ if (prdev->order > 0 && prdev->order <= MAX_PAGE_ORDER)
page_reporting_order = prdev->order;
else
page_reporting_order = pageblock_order;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index e0b368e545ed..74d2de15fb5e 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -268,7 +268,8 @@ restart:
* cleared *pmd but not decremented compound_mapcount().
*/
if ((pvmw->flags & PVMW_SYNC) &&
- transhuge_vma_suitable(vma, pvmw->address) &&
+ thp_vma_suitable_order(vma, pvmw->address,
+ PMD_ORDER) &&
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b7d7e4fcfad7..f46c80b18ce4 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -539,6 +539,11 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
* walking the kernel pages tables or page tables for firmware.
+ *
+ * Note: Be careful to walk the kernel pages tables, the caller may be need to
+ * take other effective approache (mmap lock may be insufficient) to prevent
+ * the intermediate kernel page tables belonging to the specified address range
+ * from being freed (e.g. memory hot-remove).
*/
int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
@@ -556,7 +561,29 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
if (start >= end || !walk.mm)
return -EINVAL;
- mmap_assert_write_locked(walk.mm);
+ /*
+ * 1) For walking the user virtual address space:
+ *
+ * The mmap lock protects the page walker from changes to the page
+ * tables during the walk. However a read lock is insufficient to
+ * protect those areas which don't have a VMA as munmap() detaches
+ * the VMAs before downgrading to a read lock and actually tearing
+ * down PTEs/page tables. In which case, the mmap write lock should
+ * be hold.
+ *
+ * 2) For walking the kernel virtual address space:
+ *
+ * The kernel intermediate page tables usually do not be freed, so
+ * the mmap map read lock is sufficient. But there are some exceptions.
+ * E.g. memory hot-remove. In which case, the mmap lock is insufficient
+ * to prevent the intermediate kernel pages tables belonging to the
+ * specified address range from being freed. The caller should take
+ * other actions to prevent this race.
+ */
+ if (mm == &init_mm)
+ mmap_assert_locked(walk.mm);
+ else
+ mmap_assert_write_locked(walk.mm);
return walk_pgd_range(start, end, &walk);
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 0523edab03a6..b308e96cd05a 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -53,7 +53,10 @@ static int process_vm_rw_pages(struct page **pages,
}
/* Maximum number of pages kmalloc'd to hold struct page's during copy */
-#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
+#define PVM_MAX_KMALLOC_PAGES 2
+
+/* Maximum number of pages that can be stored at a time */
+#define PVM_MAX_USER_PAGES (PVM_MAX_KMALLOC_PAGES * PAGE_SIZE / sizeof(struct page *))
/**
* process_vm_rw_single_vec - read/write pages from task specified
@@ -79,8 +82,6 @@ static int process_vm_rw_single_vec(unsigned long addr,
unsigned long start_offset = addr - pa;
unsigned long nr_pages;
ssize_t rc = 0;
- unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
- / sizeof(struct pages *);
unsigned int flags = 0;
/* Work out address and page range required */
@@ -92,7 +93,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) {
- int pinned_pages = min(nr_pages, max_pages_per_loop);
+ int pinned_pages = min_t(unsigned long, nr_pages, PVM_MAX_USER_PAGES);
int locked = 1;
size_t bytes;
@@ -171,7 +172,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
iov_len = rvec[i].iov_len;
if (iov_len > 0) {
nr_pages_iov = ((unsigned long)rvec[i].iov_base
- + iov_len)
+ + iov_len - 1)
/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
/ PAGE_SIZE + 1;
nr_pages = max(nr_pages, nr_pages_iov);
@@ -184,8 +185,8 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
/* For reliability don't try to kmalloc more than
2 pages worth */
- process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
- sizeof(struct pages *)*nr_pages),
+ process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES * PAGE_SIZE,
+ sizeof(struct page *)*nr_pages),
GFP_KERNEL);
if (!process_pages)
diff --git a/mm/readahead.c b/mm/readahead.c
index 6925e6959fd3..23620c57c122 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -511,16 +511,14 @@ void page_cache_ra_order(struct readahead_control *ractl,
unsigned int order = new_order;
/* Align with smaller pages if needed */
- if (index & ((1UL << order) - 1)) {
+ if (index & ((1UL << order) - 1))
order = __ffs(index);
- if (order == 1)
- order = 0;
- }
/* Don't allocate pages past EOF */
- while (index + (1UL << order) - 1 > limit) {
- if (--order == 1)
- order = 0;
- }
+ while (index + (1UL << order) - 1 > limit)
+ order--;
+ /* THP machinery does not support order-1 */
+ if (order == 1)
+ order = 0;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
break;
diff --git a/mm/rmap.c b/mm/rmap.c
index 7a27a2b41802..f5d43edad529 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -470,7 +470,7 @@ void __init anon_vma_init(void)
/*
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
*
- * Since there is no serialization what so ever against page_remove_rmap()
+ * Since there is no serialization what so ever against folio_remove_rmap_*()
* the best this function can do is return a refcount increased anon_vma
* that might have been relevant to this page.
*
@@ -487,9 +487,15 @@ void __init anon_vma_init(void)
* [ something equivalent to page_mapped_in_vma() ].
*
* Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
- * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
+ * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
* if there is a mapcount, we can dereference the anon_vma after observing
* those.
+ *
+ * NOTE: the caller should normally hold folio lock when calling this. If
+ * not, the caller needs to double check the anon_vma didn't change after
+ * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it
+ * concurrently without folio lock protection). See folio_lock_anon_vma_read()
+ * which has already covered that, and comment above remap_pages().
*/
struct anon_vma *folio_get_anon_vma(struct folio *folio)
{
@@ -542,6 +548,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
+retry:
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
@@ -553,6 +560,17 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
+ * folio_move_anon_rmap() might have changed the anon_vma as we
+ * might not hold the folio lock here.
+ */
+ if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
+ anon_mapping)) {
+ up_read(&root_anon_vma->rwsem);
+ rcu_read_unlock();
+ goto retry;
+ }
+
+ /*
* If the folio is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
@@ -586,6 +604,18 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
rcu_read_unlock();
anon_vma_lock_read(anon_vma);
+ /*
+ * folio_move_anon_rmap() might have changed the anon_vma as we might
+ * not hold the folio lock here.
+ */
+ if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
+ anon_mapping)) {
+ anon_vma_unlock_read(anon_vma);
+ put_anon_vma(anon_vma);
+ anon_vma = NULL;
+ goto retry;
+ }
+
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
@@ -1127,6 +1157,48 @@ int folio_total_mapcount(struct folio *folio)
return mapcount;
}
+static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level,
+ int *nr_pmdmapped)
+{
+ atomic_t *mapped = &folio->_nr_pages_mapped;
+ int first, nr = 0;
+
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ first = atomic_inc_and_test(&page->_mapcount);
+ if (first && folio_test_large(folio)) {
+ first = atomic_inc_return_relaxed(mapped);
+ first = (first < ENTIRELY_MAPPED);
+ }
+
+ if (first)
+ nr++;
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ first = atomic_inc_and_test(&folio->_entire_mapcount);
+ if (first) {
+ nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
+ if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
+ *nr_pmdmapped = folio_nr_pages(folio);
+ nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+ /* Raced ahead of a remove and another add? */
+ if (unlikely(nr < 0))
+ nr = 0;
+ } else {
+ /* Raced ahead of a remove of ENTIRELY_MAPPED */
+ nr = 0;
+ }
+ }
+ break;
+ }
+ return nr;
+}
+
/**
* folio_move_anon_rmap - move a folio to our anon_vma
* @folio: The folio to move to our anon_vma
@@ -1198,12 +1270,12 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
- * We have exclusion against page_add_anon_rmap because the caller
+ * We have exclusion against folio_add_anon_rmap_*() because the caller
* always holds the page locked.
*
- * We have exclusion against page_add_new_anon_rmap because those pages
+ * We have exclusion against folio_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
- * over the call to page_add_new_anon_rmap.
+ * over the call to folio_add_new_anon_rmap.
*/
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
folio);
@@ -1211,54 +1283,13 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
page);
}
-/**
- * page_add_anon_rmap - add pte mapping to an anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- * @flags: the rmap flags
- *
- * The caller needs to hold the pte lock, and the page must be locked in
- * the anon_vma case: to serialize mapping,index checking after setting,
- * and to ensure that PageAnon is not being upgraded racily to PageKsm
- * (but PageKsm is never downgraded to PageAnon).
- */
-void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
- unsigned long address, rmap_t flags)
+static __always_inline void __folio_add_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ unsigned long address, rmap_t flags, enum rmap_level level)
{
- struct folio *folio = page_folio(page);
- atomic_t *mapped = &folio->_nr_pages_mapped;
- int nr = 0, nr_pmdmapped = 0;
- bool compound = flags & RMAP_COMPOUND;
- bool first;
-
- /* Is page being mapped by PTE? Is this its first map to be added? */
- if (likely(!compound)) {
- first = atomic_inc_and_test(&page->_mapcount);
- nr = first;
- if (first && folio_test_large(folio)) {
- nr = atomic_inc_return_relaxed(mapped);
- nr = (nr < COMPOUND_MAPPED);
- }
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
-
- first = atomic_inc_and_test(&folio->_entire_mapcount);
- if (first) {
- nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
- /* Raced ahead of a remove and another add? */
- if (unlikely(nr < 0))
- nr = 0;
- } else {
- /* Raced ahead of a remove of COMPOUND_MAPPED */
- nr = 0;
- }
- }
- }
+ int i, nr, nr_pmdmapped = 0;
+ nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
@@ -1272,18 +1303,34 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
* folio->index right when not given the address of the head
* page.
*/
- VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio);
+ VM_WARN_ON_FOLIO(folio_test_large(folio) &&
+ level != RMAP_LEVEL_PMD, folio);
__folio_set_anon(folio, vma, address,
!!(flags & RMAP_EXCLUSIVE));
} else if (likely(!folio_test_ksm(folio))) {
__page_check_anon_rmap(folio, page, vma, address);
}
- if (flags & RMAP_EXCLUSIVE)
- SetPageAnonExclusive(page);
- /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
- VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
- (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
- PageAnonExclusive(page), folio);
+
+ if (flags & RMAP_EXCLUSIVE) {
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ for (i = 0; i < nr_pages; i++)
+ SetPageAnonExclusive(page + i);
+ break;
+ case RMAP_LEVEL_PMD:
+ SetPageAnonExclusive(page);
+ break;
+ }
+ }
+ for (i = 0; i < nr_pages; i++) {
+ struct page *cur_page = page + i;
+
+ /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
+ VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 ||
+ (folio_test_large(folio) &&
+ folio_entire_mapcount(folio) > 1)) &&
+ PageAnonExclusive(cur_page), folio);
+ }
/*
* For large folio, only mlock it if it's fully mapped to VMA. It's
@@ -1296,182 +1343,200 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
/**
+ * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
+ * @folio: The folio to add the mappings to
+ * @page: The first page to add
+ * @nr_pages: The number of pages which will be mapped
+ * @vma: The vm area in which the mappings are added
+ * @address: The user virtual address of the first page to map
+ * @flags: The rmap flags
+ *
+ * The page range of folio is defined by [first_page, first_page + nr_pages)
+ *
+ * The caller needs to hold the page table lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that an anon folio is not being upgraded racily to a KSM folio
+ * (but KSM folios are never downgraded).
+ */
+void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma, unsigned long address,
+ rmap_t flags)
+{
+ __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
+ RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
+ * @folio: The folio to add the mapping to
+ * @page: The first page to add
+ * @vma: The vm area in which the mapping is added
+ * @address: The user virtual address of the first page to map
+ * @flags: The rmap flags
+ *
+ * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting.
+ */
+void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma, unsigned long address, rmap_t flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+/**
* folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
* @folio: The folio to add the mapping to.
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * Like page_add_anon_rmap() but must only be called on *new* folios.
+ * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed.
* The folio does not have to be locked.
*
- * If the folio is large, it is accounted as a THP. As the folio
+ * If the folio is pmd-mappable, it is accounted as a THP. As the folio
* is new, it's assumed to be mapped exclusively by a single process.
*/
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address)
{
- int nr;
+ int nr = folio_nr_pages(folio);
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ VM_BUG_ON_VMA(address < vma->vm_start ||
+ address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
__folio_set_swapbacked(folio);
+ __folio_set_anon(folio, vma, address, true);
- if (likely(!folio_test_pmd_mappable(folio))) {
+ if (likely(!folio_test_large(folio))) {
/* increment count (starts at -1) */
atomic_set(&folio->_mapcount, 0);
- nr = 1;
+ SetPageAnonExclusive(&folio->page);
+ } else if (!folio_test_pmd_mappable(folio)) {
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct page *page = folio_page(folio, i);
+
+ /* increment count (starts at -1) */
+ atomic_set(&page->_mapcount, 0);
+ SetPageAnonExclusive(page);
+ }
+
+ atomic_set(&folio->_nr_pages_mapped, nr);
} else {
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
- atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
- nr = folio_nr_pages(folio);
+ atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
+ SetPageAnonExclusive(&folio->page);
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
}
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
- __folio_set_anon(folio, vma, address, true);
- SetPageAnonExclusive(&folio->page);
}
-/**
- * folio_add_file_rmap_range - add pte mapping to page range of a folio
- * @folio: The folio to add the mapping to
- * @page: The first page to add
- * @nr_pages: The number of pages which will be mapped
- * @vma: the vm area in which the mapping is added
- * @compound: charge the page as compound or small page
- *
- * The page range of folio is defined by [first_page, first_page + nr_pages)
- *
- * The caller needs to hold the pte lock.
- */
-void folio_add_file_rmap_range(struct folio *folio, struct page *page,
- unsigned int nr_pages, struct vm_area_struct *vma,
- bool compound)
+static __always_inline void __folio_add_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ enum rmap_level level)
{
- atomic_t *mapped = &folio->_nr_pages_mapped;
- unsigned int nr_pmdmapped = 0, first;
- int nr = 0;
-
- VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
-
- /* Is page being mapped by PTE? Is this its first map to be added? */
- if (likely(!compound)) {
- do {
- first = atomic_inc_and_test(&page->_mapcount);
- if (first && folio_test_large(folio)) {
- first = atomic_inc_return_relaxed(mapped);
- first = (first < COMPOUND_MAPPED);
- }
-
- if (first)
- nr++;
- } while (page++, --nr_pages > 0);
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
+ int nr, nr_pmdmapped = 0;
- first = atomic_inc_and_test(&folio->_entire_mapcount);
- if (first) {
- nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
- /* Raced ahead of a remove and another add? */
- if (unlikely(nr < 0))
- nr = 0;
- } else {
- /* Raced ahead of a remove of COMPOUND_MAPPED */
- nr = 0;
- }
- }
- }
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+ nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
- /* See comments in page_add_anon_rmap() */
+ /* See comments in folio_add_anon_rmap_*() */
if (!folio_test_large(folio))
mlock_vma_folio(folio, vma);
}
/**
- * page_add_file_rmap - add pte mapping to a file page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @compound: charge the page as compound or small page
+ * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
+ * @folio: The folio to add the mappings to
+ * @page: The first page to add
+ * @nr_pages: The number of pages that will be mapped using PTEs
+ * @vma: The vm area in which the mappings are added
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * The caller needs to hold the pte lock.
+ * The caller needs to hold the page table lock.
*/
-void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
- bool compound)
+void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma)
{
- struct folio *folio = page_folio(page);
- unsigned int nr_pages;
-
- VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
-
- if (likely(!compound))
- nr_pages = 1;
- else
- nr_pages = folio_nr_pages(folio);
-
- folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
+ __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
}
/**
- * page_remove_rmap - take down pte mapping from a page
- * @page: page to remove mapping from
- * @vma: the vm area from which the mapping is removed
- * @compound: uncharge the page as compound or small page
+ * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
+ * @folio: The folio to add the mapping to
+ * @page: The first page to add
+ * @vma: The vm area in which the mapping is added
*
- * The caller needs to hold the pte lock.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
*/
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
- bool compound)
+void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+static __always_inline void __folio_remove_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ enum rmap_level level)
{
- struct folio *folio = page_folio(page);
atomic_t *mapped = &folio->_nr_pages_mapped;
- int nr = 0, nr_pmdmapped = 0;
- bool last;
+ int last, nr = 0, nr_pmdmapped = 0;
enum node_stat_item idx;
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
-
- /* Hugetlb pages are not counted in NR_*MAPPED */
- if (unlikely(folio_test_hugetlb(folio))) {
- /* hugetlb pages are always mapped with pmds */
- atomic_dec(&folio->_entire_mapcount);
- return;
- }
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
- /* Is page being unmapped by PTE? Is this its last map to be removed? */
- if (likely(!compound)) {
- last = atomic_add_negative(-1, &page->_mapcount);
- nr = last;
- if (last && folio_test_large(folio)) {
- nr = atomic_dec_return_relaxed(mapped);
- nr = (nr < COMPOUND_MAPPED);
- }
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ last = atomic_add_negative(-1, &page->_mapcount);
+ if (last && folio_test_large(folio)) {
+ last = atomic_dec_return_relaxed(mapped);
+ last = (last < ENTIRELY_MAPPED);
+ }
+ if (last)
+ nr++;
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (last) {
- nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED)) {
+ nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
+ if (likely(nr < ENTIRELY_MAPPED)) {
nr_pmdmapped = folio_nr_pages(folio);
nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of another remove and an add? */
if (unlikely(nr < 0))
nr = 0;
} else {
- /* An add of COMPOUND_MAPPED raced ahead */
+ /* An add of ENTIRELY_MAPPED raced ahead */
nr = 0;
}
}
+ break;
}
if (nr_pmdmapped) {
@@ -1488,18 +1553,18 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
__lruvec_stat_mod_folio(folio, idx, -nr);
/*
- * Queue anon THP for deferred split if at least one
+ * Queue anon large folio for deferred split if at least one
* page of the folio is unmapped and at least one page
* is still mapped.
*/
- if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
- if (!compound || nr < nr_pmdmapped)
+ if (folio_test_large(folio) && folio_test_anon(folio))
+ if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped)
deferred_split_folio(folio);
}
/*
* It would be tidy to reset folio_test_anon mapping when fully
- * unmapped, but that might overwrite a racing page_add_anon_rmap
+ * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
* which increments mapcount after us but sets mapping before us:
* so leave the reset to free_pages_prepare, and remember that
* it's only reliable while mapped.
@@ -1508,6 +1573,43 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
munlock_vma_folio(folio, vma);
}
+/**
+ * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
+ * @folio: The folio to remove the mappings from
+ * @page: The first page to remove
+ * @nr_pages: The number of pages that will be removed from the mapping
+ * @vma: The vm area from which the mappings are removed
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma)
+{
+ __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
+ * @folio: The folio to remove the mapping from
+ * @page: The first page to remove
+ * @vma: The vm area from which the mapping is removed
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
/*
* @arg: enum ttu_flags will be passed to this argument
*/
@@ -1526,7 +1628,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@@ -1764,9 +1866,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
break;
}
- /* See page_try_share_anon_rmap(): clear PTE first. */
+ /* See folio_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
+ folio_try_share_anon_rmap_pte(folio, subpage)) {
swap_free(entry);
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
@@ -1804,7 +1906,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter_file(&folio->page));
}
discard:
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (unlikely(folio_test_hugetlb(folio)))
+ hugetlb_remove_rmap(folio);
+ else
+ folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -1872,7 +1977,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_migrate() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@@ -2037,7 +2142,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
pte_t swp_pte;
if (anon_exclusive)
- BUG_ON(page_try_share_anon_rmap(subpage));
+ WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio,
+ subpage));
/*
* Store the pfn of the page in a special migration
@@ -2108,14 +2214,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
!anon_exclusive, subpage);
- /* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
- if (folio_test_hugetlb(folio))
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (folio_test_hugetlb(folio)) {
+ if (anon_exclusive &&
+ hugetlb_try_share_anon_rmap(folio)) {
set_huge_pte_at(mm, address, pvmw.pte,
pteval, hsz);
- else
- set_pte_at(mm, address, pvmw.pte, pteval);
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ } else if (anon_exclusive &&
+ folio_try_share_anon_rmap_pte(folio, subpage)) {
+ set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
@@ -2157,7 +2268,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
}
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (unlikely(folio_test_hugetlb(folio)))
+ hugetlb_remove_rmap(folio);
+ else
+ folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -2296,7 +2410,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
* There is a reference on the page for the swap entry which has
* been removed, so shouldn't take another.
*/
- page_remove_rmap(subpage, vma, false);
+ folio_remove_rmap_pte(folio, subpage, vma);
}
mmu_notifier_invalidate_range_end(&range);
@@ -2580,12 +2694,11 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
* The following two functions are for anonymous (private mapped) hugepages.
* Unlike common anonymous pages, anonymous hugepages have no accounting code
* and no lru code, because we handle hugepages differently from common pages.
- *
- * RMAP_COMPOUND is ignored.
*/
-void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
- unsigned long address, rmap_t flags)
+void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
+ unsigned long address, rmap_t flags)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
@@ -2595,9 +2708,11 @@ void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(&folio->page), folio);
}
-void hugepage_add_new_anon_rmap(struct folio *folio,
- struct vm_area_struct *vma, unsigned long address)
+void hugetlb_add_new_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long address)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
diff --git a/mm/shmem.c b/mm/shmem.c
index 0d1ce70bce38..928aa2304932 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1514,8 +1514,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
- swap_writepage(&folio->page, wbc);
- return 0;
+ return swap_writepage(&folio->page, wbc);
}
mutex_unlock(&shmem_swaplist_mutex);
@@ -1570,15 +1569,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
{
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
- page = swap_cluster_readahead(swap, gfp, mpol, ilx);
+ folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
mpol_cond_put(mpol);
- if (!page)
- return NULL;
- return page_folio(page);
+ return folio;
}
/*
@@ -4462,8 +4459,8 @@ static void __init shmem_destroy_inodecache(void)
}
/* Keep the page in page cache instead of truncating it */
-static int shmem_error_remove_page(struct address_space *mapping,
- struct page *page)
+static int shmem_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
return 0;
}
@@ -4478,7 +4475,7 @@ const struct address_space_operations shmem_aops = {
#ifdef CONFIG_MIGRATION
.migrate_folio = migrate_folio,
#endif
- .error_remove_page = shmem_error_remove_page,
+ .error_remove_folio = shmem_error_remove_folio,
};
EXPORT_SYMBOL(shmem_aops);
diff --git a/mm/show_mem.c b/mm/show_mem.c
index ba0808d6917f..8dcfafbd283c 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -352,8 +352,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
for_each_populated_zone(zone) {
unsigned int order;
- unsigned long nr[MAX_ORDER + 1], flags, total = 0;
- unsigned char types[MAX_ORDER + 1];
+ unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
+ unsigned char types[NR_PAGE_ORDERS];
if (zone_idx(zone) > max_zone_idx)
continue;
@@ -363,7 +363,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &zone->free_area[order];
int type;
@@ -377,7 +377,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
}
}
spin_unlock_irqrestore(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
printk(KERN_CONT "%lu*%lukB ",
nr[order], K(1UL) << order);
if (nr[order])
diff --git a/mm/shrinker.c b/mm/shrinker.c
index dd91eab43ed3..dc5d2a6fcfc4 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -126,7 +126,7 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
if (new_nr_max <= old->map_nr_max)
continue;
- new = kvmalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid);
+ new = kvzalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid);
if (!new)
return -ENOMEM;
diff --git a/mm/shuffle.h b/mm/shuffle.h
index a6bdf54f96f1..61bbcddeeee6 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -4,7 +4,7 @@
#define _MM_SHUFFLE_H
#include <linux/jump_label.h>
-#define SHUFFLE_ORDER MAX_ORDER
+#define SHUFFLE_ORDER MAX_PAGE_ORDER
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
diff --git a/mm/slab.c b/mm/slab.c
deleted file mode 100644
index 9ad3d0f2d1a5..000000000000
--- a/mm/slab.c
+++ /dev/null
@@ -1,4026 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/mm/slab.c
- * Written by Mark Hemment, 1996/97.
- * (markhe@nextd.demon.co.uk)
- *
- * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
- *
- * Major cleanup, different bufctl logic, per-cpu arrays
- * (c) 2000 Manfred Spraul
- *
- * Cleanup, make the head arrays unconditional, preparation for NUMA
- * (c) 2002 Manfred Spraul
- *
- * An implementation of the Slab Allocator as described in outline in;
- * UNIX Internals: The New Frontiers by Uresh Vahalia
- * Pub: Prentice Hall ISBN 0-13-101908-2
- * or with a little more detail in;
- * The Slab Allocator: An Object-Caching Kernel Memory Allocator
- * Jeff Bonwick (Sun Microsystems).
- * Presented at: USENIX Summer 1994 Technical Conference
- *
- * The memory is organized in caches, one cache for each object type.
- * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
- * Each cache consists out of many slabs (they are small (usually one
- * page long) and always contiguous), and each slab contains multiple
- * initialized objects.
- *
- * This means, that your constructor is used only for newly allocated
- * slabs and you must pass objects with the same initializations to
- * kmem_cache_free.
- *
- * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
- * normal). If you need a special memory type, then must create a new
- * cache for that memory type.
- *
- * In order to reduce fragmentation, the slabs are sorted in 3 groups:
- * full slabs with 0 free objects
- * partial slabs
- * empty slabs with no allocated objects
- *
- * If partial slabs exist, then new allocations come from these slabs,
- * otherwise from empty slabs or new slabs are allocated.
- *
- * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
- * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
- *
- * Each cache has a short per-cpu head array, most allocs
- * and frees go into that array, and if that array overflows, then 1/2
- * of the entries in the array are given back into the global cache.
- * The head array is strictly LIFO and should improve the cache hit rates.
- * On SMP, it additionally reduces the spinlock operations.
- *
- * The c_cpuarray may not be read with enabled local interrupts -
- * it's changed with a smp_call_function().
- *
- * SMP synchronization:
- * constructors and destructors are called without any locking.
- * Several members in struct kmem_cache and struct slab never change, they
- * are accessed without any locking.
- * The per-cpu arrays are never accessed from the wrong cpu, no locking,
- * and local interrupts are disabled so slab code is preempt-safe.
- * The non-constant members are protected with a per-cache irq spinlock.
- *
- * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
- * in 2000 - many ideas in the current implementation are derived from
- * his patch.
- *
- * Further notes from the original documentation:
- *
- * 11 April '97. Started multi-threading - markhe
- * The global cache-chain is protected by the mutex 'slab_mutex'.
- * The sem is only needed when accessing/extending the cache-chain, which
- * can never happen inside an interrupt (kmem_cache_create(),
- * kmem_cache_shrink() and kmem_cache_reap()).
- *
- * At present, each engine can be growing a cache. This should be blocked.
- *
- * 15 March 2005. NUMA slab allocator.
- * Shai Fultheim <shai@scalex86.org>.
- * Shobhit Dayal <shobhit@calsoftinc.com>
- * Alok N Kataria <alokk@calsoftinc.com>
- * Christoph Lameter <christoph@lameter.com>
- *
- * Modified the slab allocator to be node aware on NUMA systems.
- * Each node has its own list of partial, free and full slabs.
- * All object allocations for a node occur from node specific slab lists.
- */
-
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/poison.h>
-#include <linux/swap.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/cpuset.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/notifier.h>
-#include <linux/kallsyms.h>
-#include <linux/kfence.h>
-#include <linux/cpu.h>
-#include <linux/sysctl.h>
-#include <linux/module.h>
-#include <linux/rcupdate.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/nodemask.h>
-#include <linux/kmemleak.h>
-#include <linux/mempolicy.h>
-#include <linux/mutex.h>
-#include <linux/fault-inject.h>
-#include <linux/rtmutex.h>
-#include <linux/reciprocal_div.h>
-#include <linux/debugobjects.h>
-#include <linux/memory.h>
-#include <linux/prefetch.h>
-#include <linux/sched/task_stack.h>
-
-#include <net/sock.h>
-
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/page.h>
-
-#include <trace/events/kmem.h>
-
-#include "internal.h"
-
-#include "slab.h"
-
-/*
- * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
- * 0 for faster, smaller code (especially in the critical paths).
- *
- * STATS - 1 to collect stats for /proc/slabinfo.
- * 0 for faster, smaller code (especially in the critical paths).
- *
- * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
- */
-
-#ifdef CONFIG_DEBUG_SLAB
-#define DEBUG 1
-#define STATS 1
-#define FORCED_DEBUG 1
-#else
-#define DEBUG 0
-#define STATS 0
-#define FORCED_DEBUG 0
-#endif
-
-/* Shouldn't this be in a header file somewhere? */
-#define BYTES_PER_WORD sizeof(void *)
-#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
-
-#ifndef ARCH_KMALLOC_FLAGS
-#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
-#endif
-
-#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
- <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
-
-#if FREELIST_BYTE_INDEX
-typedef unsigned char freelist_idx_t;
-#else
-typedef unsigned short freelist_idx_t;
-#endif
-
-#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
-
-/*
- * struct array_cache
- *
- * Purpose:
- * - LIFO ordering, to hand out cache-warm objects from _alloc
- * - reduce the number of linked list operations
- * - reduce spinlock operations
- *
- * The limit is stored in the per-cpu structure to reduce the data cache
- * footprint.
- *
- */
-struct array_cache {
- unsigned int avail;
- unsigned int limit;
- unsigned int batchcount;
- unsigned int touched;
- void *entry[]; /*
- * Must have this definition in here for the proper
- * alignment of array_cache. Also simplifies accessing
- * the entries.
- */
-};
-
-struct alien_cache {
- spinlock_t lock;
- struct array_cache ac;
-};
-
-/*
- * Need this for bootstrapping a per node allocator.
- */
-#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
-static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
-#define CACHE_CACHE 0
-#define SIZE_NODE (MAX_NUMNODES)
-
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_cache_node *n, int tofree);
-static void free_block(struct kmem_cache *cachep, void **objpp, int len,
- int node, struct list_head *list);
-static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
-static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
-static void cache_reap(struct work_struct *unused);
-
-static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
- void **list);
-static inline void fixup_slab_list(struct kmem_cache *cachep,
- struct kmem_cache_node *n, struct slab *slab,
- void **list);
-
-#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
-
-static void kmem_cache_node_init(struct kmem_cache_node *parent)
-{
- INIT_LIST_HEAD(&parent->slabs_full);
- INIT_LIST_HEAD(&parent->slabs_partial);
- INIT_LIST_HEAD(&parent->slabs_free);
- parent->total_slabs = 0;
- parent->free_slabs = 0;
- parent->shared = NULL;
- parent->alien = NULL;
- parent->colour_next = 0;
- raw_spin_lock_init(&parent->list_lock);
- parent->free_objects = 0;
- parent->free_touched = 0;
-}
-
-#define MAKE_LIST(cachep, listp, slab, nodeid) \
- do { \
- INIT_LIST_HEAD(listp); \
- list_splice(&get_node(cachep, nodeid)->slab, listp); \
- } while (0)
-
-#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
- do { \
- MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
- MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
- MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
- } while (0)
-
-#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U)
-#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U)
-#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
-#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
-
-#define BATCHREFILL_LIMIT 16
-/*
- * Optimization question: fewer reaps means less probability for unnecessary
- * cpucache drain/refill cycles.
- *
- * OTOH the cpuarrays can contain lots of objects,
- * which could lock up otherwise freeable slabs.
- */
-#define REAPTIMEOUT_AC (2*HZ)
-#define REAPTIMEOUT_NODE (4*HZ)
-
-#if STATS
-#define STATS_INC_ACTIVE(x) ((x)->num_active++)
-#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
-#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
-#define STATS_INC_GROWN(x) ((x)->grown++)
-#define STATS_ADD_REAPED(x, y) ((x)->reaped += (y))
-#define STATS_SET_HIGH(x) \
- do { \
- if ((x)->num_active > (x)->high_mark) \
- (x)->high_mark = (x)->num_active; \
- } while (0)
-#define STATS_INC_ERR(x) ((x)->errors++)
-#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
-#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
-#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
-#define STATS_SET_FREEABLE(x, i) \
- do { \
- if ((x)->max_freeable < i) \
- (x)->max_freeable = i; \
- } while (0)
-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
-#else
-#define STATS_INC_ACTIVE(x) do { } while (0)
-#define STATS_DEC_ACTIVE(x) do { } while (0)
-#define STATS_INC_ALLOCED(x) do { } while (0)
-#define STATS_INC_GROWN(x) do { } while (0)
-#define STATS_ADD_REAPED(x, y) do { (void)(y); } while (0)
-#define STATS_SET_HIGH(x) do { } while (0)
-#define STATS_INC_ERR(x) do { } while (0)
-#define STATS_INC_NODEALLOCS(x) do { } while (0)
-#define STATS_INC_NODEFREES(x) do { } while (0)
-#define STATS_INC_ACOVERFLOW(x) do { } while (0)
-#define STATS_SET_FREEABLE(x, i) do { } while (0)
-#define STATS_INC_ALLOCHIT(x) do { } while (0)
-#define STATS_INC_ALLOCMISS(x) do { } while (0)
-#define STATS_INC_FREEHIT(x) do { } while (0)
-#define STATS_INC_FREEMISS(x) do { } while (0)
-#endif
-
-#if DEBUG
-
-/*
- * memory layout of objects:
- * 0 : objp
- * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
- * the end of an object is aligned with the end of the real
- * allocation. Catches writes behind the end of the allocation.
- * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
- * redzone word.
- * cachep->obj_offset: The real object.
- * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->size - 1* BYTES_PER_WORD: last caller address
- * [BYTES_PER_WORD long]
- */
-static int obj_offset(struct kmem_cache *cachep)
-{
- return cachep->obj_offset;
-}
-
-static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
-{
- BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long long *) (objp + obj_offset(cachep) -
- sizeof(unsigned long long));
-}
-
-static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
-{
- BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long long *)(objp + cachep->size -
- sizeof(unsigned long long) -
- REDZONE_ALIGN);
- return (unsigned long long *) (objp + cachep->size -
- sizeof(unsigned long long));
-}
-
-static void **dbg_userword(struct kmem_cache *cachep, void *objp)
-{
- BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void **)(objp + cachep->size - BYTES_PER_WORD);
-}
-
-#else
-
-#define obj_offset(x) 0
-#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
-#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
-#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
-
-#endif
-
-/*
- * Do not go above this order unless 0 objects fit into the slab or
- * overridden on the command line.
- */
-#define SLAB_MAX_ORDER_HI 1
-#define SLAB_MAX_ORDER_LO 0
-static int slab_max_order = SLAB_MAX_ORDER_LO;
-static bool slab_max_order_set __initdata;
-
-static inline void *index_to_obj(struct kmem_cache *cache,
- const struct slab *slab, unsigned int idx)
-{
- return slab->s_mem + cache->size * idx;
-}
-
-#define BOOT_CPUCACHE_ENTRIES 1
-/* internal cache of cache description objs */
-static struct kmem_cache kmem_cache_boot = {
- .batchcount = 1,
- .limit = BOOT_CPUCACHE_ENTRIES,
- .shared = 1,
- .size = sizeof(struct kmem_cache),
- .name = "kmem_cache",
-};
-
-static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
-
-static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
-{
- return this_cpu_ptr(cachep->cpu_cache);
-}
-
-/*
- * Calculate the number of objects and left-over bytes for a given buffer size.
- */
-static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
- slab_flags_t flags, size_t *left_over)
-{
- unsigned int num;
- size_t slab_size = PAGE_SIZE << gfporder;
-
- /*
- * The slab management structure can be either off the slab or
- * on it. For the latter case, the memory allocated for a
- * slab is used for:
- *
- * - @buffer_size bytes for each object
- * - One freelist_idx_t for each object
- *
- * We don't need to consider alignment of freelist because
- * freelist will be at the end of slab page. The objects will be
- * at the correct alignment.
- *
- * If the slab management structure is off the slab, then the
- * alignment will already be calculated into the size. Because
- * the slabs are all pages aligned, the objects will be at the
- * correct alignment when allocated.
- */
- if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
- num = slab_size / buffer_size;
- *left_over = slab_size % buffer_size;
- } else {
- num = slab_size / (buffer_size + sizeof(freelist_idx_t));
- *left_over = slab_size %
- (buffer_size + sizeof(freelist_idx_t));
- }
-
- return num;
-}
-
-#if DEBUG
-#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
-
-static void __slab_error(const char *function, struct kmem_cache *cachep,
- char *msg)
-{
- pr_err("slab error in %s(): cache `%s': %s\n",
- function, cachep->name, msg);
- dump_stack();
- add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-}
-#endif
-
-/*
- * By default on NUMA we use alien caches to stage the freeing of
- * objects allocated from other nodes. This causes massive memory
- * inefficiencies when using fake NUMA setup to split memory into a
- * large number of small nodes, so it can be disabled on the command
- * line
- */
-
-static int use_alien_caches __read_mostly = 1;
-static int __init noaliencache_setup(char *s)
-{
- use_alien_caches = 0;
- return 1;
-}
-__setup("noaliencache", noaliencache_setup);
-
-static int __init slab_max_order_setup(char *str)
-{
- get_option(&str, &slab_max_order);
- slab_max_order = slab_max_order < 0 ? 0 :
- min(slab_max_order, MAX_ORDER);
- slab_max_order_set = true;
-
- return 1;
-}
-__setup("slab_max_order=", slab_max_order_setup);
-
-#ifdef CONFIG_NUMA
-/*
- * Special reaping functions for NUMA systems called from cache_reap().
- * These take care of doing round robin flushing of alien caches (containing
- * objects freed on different nodes from which they were allocated) and the
- * flushing of remote pcps by calling drain_node_pages.
- */
-static DEFINE_PER_CPU(unsigned long, slab_reap_node);
-
-static void init_reap_node(int cpu)
-{
- per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
- node_online_map);
-}
-
-static void next_reap_node(void)
-{
- int node = __this_cpu_read(slab_reap_node);
-
- node = next_node_in(node, node_online_map);
- __this_cpu_write(slab_reap_node, node);
-}
-
-#else
-#define init_reap_node(cpu) do { } while (0)
-#define next_reap_node(void) do { } while (0)
-#endif
-
-/*
- * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
- * via the workqueue/eventd.
- * Add the CPU number into the expiration time to minimize the possibility of
- * the CPUs getting into lockstep and contending for the global cache chain
- * lock.
- */
-static void start_cpu_timer(int cpu)
-{
- struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
-
- if (reap_work->work.func == NULL) {
- init_reap_node(cpu);
- INIT_DEFERRABLE_WORK(reap_work, cache_reap);
- schedule_delayed_work_on(cpu, reap_work,
- __round_jiffies_relative(HZ, cpu));
- }
-}
-
-static void init_arraycache(struct array_cache *ac, int limit, int batch)
-{
- if (ac) {
- ac->avail = 0;
- ac->limit = limit;
- ac->batchcount = batch;
- ac->touched = 0;
- }
-}
-
-static struct array_cache *alloc_arraycache(int node, int entries,
- int batchcount, gfp_t gfp)
-{
- size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
- struct array_cache *ac = NULL;
-
- ac = kmalloc_node(memsize, gfp, node);
- /*
- * The array_cache structures contain pointers to free object.
- * However, when such objects are allocated or transferred to another
- * cache the pointers are not cleared and they could be counted as
- * valid references during a kmemleak scan. Therefore, kmemleak must
- * not scan such objects.
- */
- kmemleak_no_scan(ac);
- init_arraycache(ac, entries, batchcount);
- return ac;
-}
-
-static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
- struct slab *slab, void *objp)
-{
- struct kmem_cache_node *n;
- int slab_node;
- LIST_HEAD(list);
-
- slab_node = slab_nid(slab);
- n = get_node(cachep, slab_node);
-
- raw_spin_lock(&n->list_lock);
- free_block(cachep, &objp, 1, slab_node, &list);
- raw_spin_unlock(&n->list_lock);
-
- slabs_destroy(cachep, &list);
-}
-
-/*
- * Transfer objects in one arraycache to another.
- * Locking must be handled by the caller.
- *
- * Return the number of entries transferred.
- */
-static int transfer_objects(struct array_cache *to,
- struct array_cache *from, unsigned int max)
-{
- /* Figure out how many entries to transfer */
- int nr = min3(from->avail, max, to->limit - to->avail);
-
- if (!nr)
- return 0;
-
- memcpy(to->entry + to->avail, from->entry + from->avail - nr,
- sizeof(void *) *nr);
-
- from->avail -= nr;
- to->avail += nr;
- return nr;
-}
-
-/* &alien->lock must be held by alien callers. */
-static __always_inline void __free_one(struct array_cache *ac, void *objp)
-{
- /* Avoid trivial double-free. */
- if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
- WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
- return;
- ac->entry[ac->avail++] = objp;
-}
-
-#ifndef CONFIG_NUMA
-
-#define drain_alien_cache(cachep, alien) do { } while (0)
-#define reap_alien(cachep, n) do { } while (0)
-
-static inline struct alien_cache **alloc_alien_cache(int node,
- int limit, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline void free_alien_cache(struct alien_cache **ac_ptr)
-{
-}
-
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- return 0;
-}
-
-static inline gfp_t gfp_exact_node(gfp_t flags)
-{
- return flags & ~__GFP_NOFAIL;
-}
-
-#else /* CONFIG_NUMA */
-
-static struct alien_cache *__alloc_alien_cache(int node, int entries,
- int batch, gfp_t gfp)
-{
- size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
- struct alien_cache *alc = NULL;
-
- alc = kmalloc_node(memsize, gfp, node);
- if (alc) {
- kmemleak_no_scan(alc);
- init_arraycache(&alc->ac, entries, batch);
- spin_lock_init(&alc->lock);
- }
- return alc;
-}
-
-static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
-{
- struct alien_cache **alc_ptr;
- int i;
-
- if (limit > 1)
- limit = 12;
- alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
- if (!alc_ptr)
- return NULL;
-
- for_each_node(i) {
- if (i == node || !node_online(i))
- continue;
- alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
- if (!alc_ptr[i]) {
- for (i--; i >= 0; i--)
- kfree(alc_ptr[i]);
- kfree(alc_ptr);
- return NULL;
- }
- }
- return alc_ptr;
-}
-
-static void free_alien_cache(struct alien_cache **alc_ptr)
-{
- int i;
-
- if (!alc_ptr)
- return;
- for_each_node(i)
- kfree(alc_ptr[i]);
- kfree(alc_ptr);
-}
-
-static void __drain_alien_cache(struct kmem_cache *cachep,
- struct array_cache *ac, int node,
- struct list_head *list)
-{
- struct kmem_cache_node *n = get_node(cachep, node);
-
- if (ac->avail) {
- raw_spin_lock(&n->list_lock);
- /*
- * Stuff objects into the remote nodes shared array first.
- * That way we could avoid the overhead of putting the objects
- * into the free lists and getting them back later.
- */
- if (n->shared)
- transfer_objects(n->shared, ac, ac->limit);
-
- free_block(cachep, ac->entry, ac->avail, node, list);
- ac->avail = 0;
- raw_spin_unlock(&n->list_lock);
- }
-}
-
-/*
- * Called from cache_reap() to regularly drain alien caches round robin.
- */
-static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
-{
- int node = __this_cpu_read(slab_reap_node);
-
- if (n->alien) {
- struct alien_cache *alc = n->alien[node];
- struct array_cache *ac;
-
- if (alc) {
- ac = &alc->ac;
- if (ac->avail && spin_trylock_irq(&alc->lock)) {
- LIST_HEAD(list);
-
- __drain_alien_cache(cachep, ac, node, &list);
- spin_unlock_irq(&alc->lock);
- slabs_destroy(cachep, &list);
- }
- }
- }
-}
-
-static void drain_alien_cache(struct kmem_cache *cachep,
- struct alien_cache **alien)
-{
- int i = 0;
- struct alien_cache *alc;
- struct array_cache *ac;
- unsigned long flags;
-
- for_each_online_node(i) {
- alc = alien[i];
- if (alc) {
- LIST_HEAD(list);
-
- ac = &alc->ac;
- spin_lock_irqsave(&alc->lock, flags);
- __drain_alien_cache(cachep, ac, i, &list);
- spin_unlock_irqrestore(&alc->lock, flags);
- slabs_destroy(cachep, &list);
- }
- }
-}
-
-static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
- int node, int slab_node)
-{
- struct kmem_cache_node *n;
- struct alien_cache *alien = NULL;
- struct array_cache *ac;
- LIST_HEAD(list);
-
- n = get_node(cachep, node);
- STATS_INC_NODEFREES(cachep);
- if (n->alien && n->alien[slab_node]) {
- alien = n->alien[slab_node];
- ac = &alien->ac;
- spin_lock(&alien->lock);
- if (unlikely(ac->avail == ac->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep, ac, slab_node, &list);
- }
- __free_one(ac, objp);
- spin_unlock(&alien->lock);
- slabs_destroy(cachep, &list);
- } else {
- n = get_node(cachep, slab_node);
- raw_spin_lock(&n->list_lock);
- free_block(cachep, &objp, 1, slab_node, &list);
- raw_spin_unlock(&n->list_lock);
- slabs_destroy(cachep, &list);
- }
- return 1;
-}
-
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
-{
- int slab_node = slab_nid(virt_to_slab(objp));
- int node = numa_mem_id();
- /*
- * Make sure we are not freeing an object from another node to the array
- * cache on this cpu.
- */
- if (likely(node == slab_node))
- return 0;
-
- return __cache_free_alien(cachep, objp, node, slab_node);
-}
-
-/*
- * Construct gfp mask to allocate from a specific node but do not reclaim or
- * warn about failures.
- */
-static inline gfp_t gfp_exact_node(gfp_t flags)
-{
- return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
-}
-#endif
-
-static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
-{
- struct kmem_cache_node *n;
-
- /*
- * Set up the kmem_cache_node for cpu before we can
- * begin anything. Make sure some other cpu on this
- * node has not already allocated this
- */
- n = get_node(cachep, node);
- if (n) {
- raw_spin_lock_irq(&n->list_lock);
- n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
- cachep->num;
- raw_spin_unlock_irq(&n->list_lock);
-
- return 0;
- }
-
- n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
- if (!n)
- return -ENOMEM;
-
- kmem_cache_node_init(n);
- n->next_reap = jiffies + REAPTIMEOUT_NODE +
- ((unsigned long)cachep) % REAPTIMEOUT_NODE;
-
- n->free_limit =
- (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
-
- /*
- * The kmem_cache_nodes don't come and go as CPUs
- * come and go. slab_mutex provides sufficient
- * protection here.
- */
- cachep->node[node] = n;
-
- return 0;
-}
-
-#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
-/*
- * Allocates and initializes node for a node on each slab cache, used for
- * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
- * will be allocated off-node since memory is not yet online for the new node.
- * When hotplugging memory or a cpu, existing nodes are not replaced if
- * already in use.
- *
- * Must hold slab_mutex.
- */
-static int init_cache_node_node(int node)
-{
- int ret;
- struct kmem_cache *cachep;
-
- list_for_each_entry(cachep, &slab_caches, list) {
- ret = init_cache_node(cachep, node, GFP_KERNEL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-#endif
-
-static int setup_kmem_cache_node(struct kmem_cache *cachep,
- int node, gfp_t gfp, bool force_change)
-{
- int ret = -ENOMEM;
- struct kmem_cache_node *n;
- struct array_cache *old_shared = NULL;
- struct array_cache *new_shared = NULL;
- struct alien_cache **new_alien = NULL;
- LIST_HEAD(list);
-
- if (use_alien_caches) {
- new_alien = alloc_alien_cache(node, cachep->limit, gfp);
- if (!new_alien)
- goto fail;
- }
-
- if (cachep->shared) {
- new_shared = alloc_arraycache(node,
- cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
- if (!new_shared)
- goto fail;
- }
-
- ret = init_cache_node(cachep, node, gfp);
- if (ret)
- goto fail;
-
- n = get_node(cachep, node);
- raw_spin_lock_irq(&n->list_lock);
- if (n->shared && force_change) {
- free_block(cachep, n->shared->entry,
- n->shared->avail, node, &list);
- n->shared->avail = 0;
- }
-
- if (!n->shared || force_change) {
- old_shared = n->shared;
- n->shared = new_shared;
- new_shared = NULL;
- }
-
- if (!n->alien) {
- n->alien = new_alien;
- new_alien = NULL;
- }
-
- raw_spin_unlock_irq(&n->list_lock);
- slabs_destroy(cachep, &list);
-
- /*
- * To protect lockless access to n->shared during irq disabled context.
- * If n->shared isn't NULL in irq disabled context, accessing to it is
- * guaranteed to be valid until irq is re-enabled, because it will be
- * freed after synchronize_rcu().
- */
- if (old_shared && force_change)
- synchronize_rcu();
-
-fail:
- kfree(old_shared);
- kfree(new_shared);
- free_alien_cache(new_alien);
-
- return ret;
-}
-
-#ifdef CONFIG_SMP
-
-static void cpuup_canceled(long cpu)
-{
- struct kmem_cache *cachep;
- struct kmem_cache_node *n = NULL;
- int node = cpu_to_mem(cpu);
- const struct cpumask *mask = cpumask_of_node(node);
-
- list_for_each_entry(cachep, &slab_caches, list) {
- struct array_cache *nc;
- struct array_cache *shared;
- struct alien_cache **alien;
- LIST_HEAD(list);
-
- n = get_node(cachep, node);
- if (!n)
- continue;
-
- raw_spin_lock_irq(&n->list_lock);
-
- /* Free limit for this kmem_cache_node */
- n->free_limit -= cachep->batchcount;
-
- /* cpu is dead; no one can alloc from it. */
- nc = per_cpu_ptr(cachep->cpu_cache, cpu);
- free_block(cachep, nc->entry, nc->avail, node, &list);
- nc->avail = 0;
-
- if (!cpumask_empty(mask)) {
- raw_spin_unlock_irq(&n->list_lock);
- goto free_slab;
- }
-
- shared = n->shared;
- if (shared) {
- free_block(cachep, shared->entry,
- shared->avail, node, &list);
- n->shared = NULL;
- }
-
- alien = n->alien;
- n->alien = NULL;
-
- raw_spin_unlock_irq(&n->list_lock);
-
- kfree(shared);
- if (alien) {
- drain_alien_cache(cachep, alien);
- free_alien_cache(alien);
- }
-
-free_slab:
- slabs_destroy(cachep, &list);
- }
- /*
- * In the previous loop, all the objects were freed to
- * the respective cache's slabs, now we can go ahead and
- * shrink each nodelist to its limit.
- */
- list_for_each_entry(cachep, &slab_caches, list) {
- n = get_node(cachep, node);
- if (!n)
- continue;
- drain_freelist(cachep, n, INT_MAX);
- }
-}
-
-static int cpuup_prepare(long cpu)
-{
- struct kmem_cache *cachep;
- int node = cpu_to_mem(cpu);
- int err;
-
- /*
- * We need to do this right in the beginning since
- * alloc_arraycache's are going to use this list.
- * kmalloc_node allows us to add the slab to the right
- * kmem_cache_node and not this cpu's kmem_cache_node
- */
- err = init_cache_node_node(node);
- if (err < 0)
- goto bad;
-
- /*
- * Now we can go ahead with allocating the shared arrays and
- * array caches
- */
- list_for_each_entry(cachep, &slab_caches, list) {
- err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
- if (err)
- goto bad;
- }
-
- return 0;
-bad:
- cpuup_canceled(cpu);
- return -ENOMEM;
-}
-
-int slab_prepare_cpu(unsigned int cpu)
-{
- int err;
-
- mutex_lock(&slab_mutex);
- err = cpuup_prepare(cpu);
- mutex_unlock(&slab_mutex);
- return err;
-}
-
-/*
- * This is called for a failed online attempt and for a successful
- * offline.
- *
- * Even if all the cpus of a node are down, we don't free the
- * kmem_cache_node of any cache. This is to avoid a race between cpu_down, and
- * a kmalloc allocation from another cpu for memory from the node of
- * the cpu going down. The kmem_cache_node structure is usually allocated from
- * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
- */
-int slab_dead_cpu(unsigned int cpu)
-{
- mutex_lock(&slab_mutex);
- cpuup_canceled(cpu);
- mutex_unlock(&slab_mutex);
- return 0;
-}
-#endif
-
-static int slab_online_cpu(unsigned int cpu)
-{
- start_cpu_timer(cpu);
- return 0;
-}
-
-static int slab_offline_cpu(unsigned int cpu)
-{
- /*
- * Shutdown cache reaper. Note that the slab_mutex is held so
- * that if cache_reap() is invoked it cannot do anything
- * expensive but will only modify reap_work and reschedule the
- * timer.
- */
- cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
- /* Now the cache_reaper is guaranteed to be not running. */
- per_cpu(slab_reap_work, cpu).work.func = NULL;
- return 0;
-}
-
-#if defined(CONFIG_NUMA)
-/*
- * Drains freelist for a node on each slab cache, used for memory hot-remove.
- * Returns -EBUSY if all objects cannot be drained so that the node is not
- * removed.
- *
- * Must hold slab_mutex.
- */
-static int __meminit drain_cache_node_node(int node)
-{
- struct kmem_cache *cachep;
- int ret = 0;
-
- list_for_each_entry(cachep, &slab_caches, list) {
- struct kmem_cache_node *n;
-
- n = get_node(cachep, node);
- if (!n)
- continue;
-
- drain_freelist(cachep, n, INT_MAX);
-
- if (!list_empty(&n->slabs_full) ||
- !list_empty(&n->slabs_partial)) {
- ret = -EBUSY;
- break;
- }
- }
- return ret;
-}
-
-static int __meminit slab_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int ret = 0;
- int nid;
-
- nid = mnb->status_change_nid;
- if (nid < 0)
- goto out;
-
- switch (action) {
- case MEM_GOING_ONLINE:
- mutex_lock(&slab_mutex);
- ret = init_cache_node_node(nid);
- mutex_unlock(&slab_mutex);
- break;
- case MEM_GOING_OFFLINE:
- mutex_lock(&slab_mutex);
- ret = drain_cache_node_node(nid);
- mutex_unlock(&slab_mutex);
- break;
- case MEM_ONLINE:
- case MEM_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- break;
- }
-out:
- return notifier_from_errno(ret);
-}
-#endif /* CONFIG_NUMA */
-
-/*
- * swap the static kmem_cache_node with kmalloced memory
- */
-static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
- int nodeid)
-{
- struct kmem_cache_node *ptr;
-
- ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
- BUG_ON(!ptr);
-
- memcpy(ptr, list, sizeof(struct kmem_cache_node));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- raw_spin_lock_init(&ptr->list_lock);
-
- MAKE_ALL_LISTS(cachep, ptr, nodeid);
- cachep->node[nodeid] = ptr;
-}
-
-/*
- * For setting up all the kmem_cache_node for cache whose buffer_size is same as
- * size of kmem_cache_node.
- */
-static void __init set_up_node(struct kmem_cache *cachep, int index)
-{
- int node;
-
- for_each_online_node(node) {
- cachep->node[node] = &init_kmem_cache_node[index + node];
- cachep->node[node]->next_reap = jiffies +
- REAPTIMEOUT_NODE +
- ((unsigned long)cachep) % REAPTIMEOUT_NODE;
- }
-}
-
-/*
- * Initialisation. Called after the page allocator have been initialised and
- * before smp_init().
- */
-void __init kmem_cache_init(void)
-{
- int i;
-
- kmem_cache = &kmem_cache_boot;
-
- if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
- use_alien_caches = 0;
-
- for (i = 0; i < NUM_INIT_LISTS; i++)
- kmem_cache_node_init(&init_kmem_cache_node[i]);
-
- /*
- * Fragmentation resistance on low memory - only use bigger
- * page orders on machines with more than 32MB of memory if
- * not overridden on the command line.
- */
- if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
- slab_max_order = SLAB_MAX_ORDER_HI;
-
- /* Bootstrap is tricky, because several objects are allocated
- * from caches that do not exist yet:
- * 1) initialize the kmem_cache cache: it contains the struct
- * kmem_cache structures of all caches, except kmem_cache itself:
- * kmem_cache is statically allocated.
- * Initially an __init data area is used for the head array and the
- * kmem_cache_node structures, it's replaced with a kmalloc allocated
- * array at the end of the bootstrap.
- * 2) Create the first kmalloc cache.
- * The struct kmem_cache for the new cache is allocated normally.
- * An __init data area is used for the head array.
- * 3) Create the remaining kmalloc caches, with minimally sized
- * head arrays.
- * 4) Replace the __init data head arrays for kmem_cache and the first
- * kmalloc cache with kmalloc allocated arrays.
- * 5) Replace the __init data for kmem_cache_node for kmem_cache and
- * the other cache's with kmalloc allocated memory.
- * 6) Resize the head arrays of the kmalloc caches to their final sizes.
- */
-
- /* 1) create the kmem_cache */
-
- /*
- * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
- */
- create_boot_cache(kmem_cache, "kmem_cache",
- offsetof(struct kmem_cache, node) +
- nr_node_ids * sizeof(struct kmem_cache_node *),
- SLAB_HWCACHE_ALIGN, 0, 0);
- list_add(&kmem_cache->list, &slab_caches);
- slab_state = PARTIAL;
-
- /*
- * Initialize the caches that provide memory for the kmem_cache_node
- * structures first. Without this, further allocations will bug.
- */
- new_kmalloc_cache(INDEX_NODE, KMALLOC_NORMAL, ARCH_KMALLOC_FLAGS);
- slab_state = PARTIAL_NODE;
- setup_kmalloc_cache_index_table();
-
- /* 5) Replace the bootstrap kmem_cache_node */
- {
- int nid;
-
- for_each_online_node(nid) {
- init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
-
- init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
- &init_kmem_cache_node[SIZE_NODE + nid], nid);
- }
- }
-
- create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
-}
-
-void __init kmem_cache_init_late(void)
-{
- struct kmem_cache *cachep;
-
- /* 6) resize the head arrays to their final sizes */
- mutex_lock(&slab_mutex);
- list_for_each_entry(cachep, &slab_caches, list)
- if (enable_cpucache(cachep, GFP_NOWAIT))
- BUG();
- mutex_unlock(&slab_mutex);
-
- /* Done! */
- slab_state = FULL;
-
-#ifdef CONFIG_NUMA
- /*
- * Register a memory hotplug callback that initializes and frees
- * node.
- */
- hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-#endif
-
- /*
- * The reap timers are started later, with a module init call: That part
- * of the kernel is not yet operational.
- */
-}
-
-static int __init cpucache_init(void)
-{
- int ret;
-
- /*
- * Register the timers that return unneeded pages to the page allocator
- */
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
- slab_online_cpu, slab_offline_cpu);
- WARN_ON(ret < 0);
-
- return 0;
-}
-__initcall(cpucache_init);
-
-static noinline void
-slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
-{
-#if DEBUG
- struct kmem_cache_node *n;
- unsigned long flags;
- int node;
- static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-
- if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
- return;
-
- pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
- nodeid, gfpflags, &gfpflags);
- pr_warn(" cache: %s, object size: %d, order: %d\n",
- cachep->name, cachep->size, cachep->gfporder);
-
- for_each_kmem_cache_node(cachep, node, n) {
- unsigned long total_slabs, free_slabs, free_objs;
-
- raw_spin_lock_irqsave(&n->list_lock, flags);
- total_slabs = n->total_slabs;
- free_slabs = n->free_slabs;
- free_objs = n->free_objects;
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
-
- pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
- node, total_slabs - free_slabs, total_slabs,
- (total_slabs * cachep->num) - free_objs,
- total_slabs * cachep->num);
- }
-#endif
-}
-
-/*
- * Interface to system's page allocator. No need to hold the
- * kmem_cache_node ->list_lock.
- *
- * If we requested dmaable memory, we will get it. Even if we
- * did not request dmaable memory, we might get it, but that
- * would be relatively rare and ignorable.
- */
-static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
- int nodeid)
-{
- struct folio *folio;
- struct slab *slab;
-
- flags |= cachep->allocflags;
-
- folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder);
- if (!folio) {
- slab_out_of_memory(cachep, flags, nodeid);
- return NULL;
- }
-
- slab = folio_slab(folio);
-
- account_slab(slab, cachep->gfporder, cachep, flags);
- __folio_set_slab(folio);
- /* Make the flag visible before any changes to folio->mapping */
- smp_wmb();
- /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
- if (sk_memalloc_socks() && folio_is_pfmemalloc(folio))
- slab_set_pfmemalloc(slab);
-
- return slab;
-}
-
-/*
- * Interface to system's page release.
- */
-static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
-{
- int order = cachep->gfporder;
- struct folio *folio = slab_folio(slab);
-
- BUG_ON(!folio_test_slab(folio));
- __slab_clear_pfmemalloc(slab);
- page_mapcount_reset(&folio->page);
- folio->mapping = NULL;
- /* Make the mapping reset visible before clearing the flag */
- smp_wmb();
- __folio_clear_slab(folio);
-
- mm_account_reclaimed_pages(1 << order);
- unaccount_slab(slab, order, cachep);
- __free_pages(&folio->page, order);
-}
-
-static void kmem_rcu_free(struct rcu_head *head)
-{
- struct kmem_cache *cachep;
- struct slab *slab;
-
- slab = container_of(head, struct slab, rcu_head);
- cachep = slab->slab_cache;
-
- kmem_freepages(cachep, slab);
-}
-
-#if DEBUG
-static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
-{
- return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
- ((cachep->size % PAGE_SIZE) == 0);
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
-{
- if (!is_debug_pagealloc_cache(cachep))
- return;
-
- __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
-}
-
-#else
-static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map) {}
-
-#endif
-
-static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
-{
- int size = cachep->object_size;
- addr = &((char *)addr)[obj_offset(cachep)];
-
- memset(addr, val, size);
- *(unsigned char *)(addr + size - 1) = POISON_END;
-}
-
-static void dump_line(char *data, int offset, int limit)
-{
- int i;
- unsigned char error = 0;
- int bad_count = 0;
-
- pr_err("%03x: ", offset);
- for (i = 0; i < limit; i++) {
- if (data[offset + i] != POISON_FREE) {
- error = data[offset + i];
- bad_count++;
- }
- }
- print_hex_dump(KERN_CONT, "", 0, 16, 1,
- &data[offset], limit, 1);
-
- if (bad_count == 1) {
- error ^= POISON_FREE;
- if (!(error & (error - 1))) {
- pr_err("Single bit error detected. Probably bad RAM.\n");
-#ifdef CONFIG_X86
- pr_err("Run memtest86+ or a similar memory test tool.\n");
-#else
- pr_err("Run a memory test tool.\n");
-#endif
- }
- }
-}
-#endif
-
-#if DEBUG
-
-static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
-{
- int i, size;
- char *realobj;
-
- if (cachep->flags & SLAB_RED_ZONE) {
- pr_err("Redzone: 0x%llx/0x%llx\n",
- *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
- }
-
- if (cachep->flags & SLAB_STORE_USER)
- pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
- realobj = (char *)objp + obj_offset(cachep);
- size = cachep->object_size;
- for (i = 0; i < size && lines; i += 16, lines--) {
- int limit;
- limit = 16;
- if (i + limit > size)
- limit = size - i;
- dump_line(realobj, i, limit);
- }
-}
-
-static void check_poison_obj(struct kmem_cache *cachep, void *objp)
-{
- char *realobj;
- int size, i;
- int lines = 0;
-
- if (is_debug_pagealloc_cache(cachep))
- return;
-
- realobj = (char *)objp + obj_offset(cachep);
- size = cachep->object_size;
-
- for (i = 0; i < size; i++) {
- char exp = POISON_FREE;
- if (i == size - 1)
- exp = POISON_END;
- if (realobj[i] != exp) {
- int limit;
- /* Mismatch ! */
- /* Print header */
- if (lines == 0) {
- pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
- print_tainted(), cachep->name,
- realobj, size);
- print_objinfo(cachep, objp, 0);
- }
- /* Hexdump the affected line */
- i = (i / 16) * 16;
- limit = 16;
- if (i + limit > size)
- limit = size - i;
- dump_line(realobj, i, limit);
- i += 16;
- lines++;
- /* Limit to 5 lines */
- if (lines > 5)
- break;
- }
- }
- if (lines != 0) {
- /* Print some data about the neighboring objects, if they
- * exist:
- */
- struct slab *slab = virt_to_slab(objp);
- unsigned int objnr;
-
- objnr = obj_to_index(cachep, slab, objp);
- if (objnr) {
- objp = index_to_obj(cachep, slab, objnr - 1);
- realobj = (char *)objp + obj_offset(cachep);
- pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
- print_objinfo(cachep, objp, 2);
- }
- if (objnr + 1 < cachep->num) {
- objp = index_to_obj(cachep, slab, objnr + 1);
- realobj = (char *)objp + obj_offset(cachep);
- pr_err("Next obj: start=%px, len=%d\n", realobj, size);
- print_objinfo(cachep, objp, 2);
- }
- }
-}
-#endif
-
-#if DEBUG
-static void slab_destroy_debugcheck(struct kmem_cache *cachep,
- struct slab *slab)
-{
- int i;
-
- if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
- poison_obj(cachep, slab->freelist - obj_offset(cachep),
- POISON_FREE);
- }
-
- for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slab, i);
-
- if (cachep->flags & SLAB_POISON) {
- check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
- }
- if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "start of a freed object was overwritten");
- if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "end of a freed object was overwritten");
- }
- }
-}
-#else
-static void slab_destroy_debugcheck(struct kmem_cache *cachep,
- struct slab *slab)
-{
-}
-#endif
-
-/**
- * slab_destroy - destroy and release all objects in a slab
- * @cachep: cache pointer being destroyed
- * @slab: slab being destroyed
- *
- * Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache. The
- * kmem_cache_node ->list_lock is not held/needed.
- */
-static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
-{
- void *freelist;
-
- freelist = slab->freelist;
- slab_destroy_debugcheck(cachep, slab);
- if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
- call_rcu(&slab->rcu_head, kmem_rcu_free);
- else
- kmem_freepages(cachep, slab);
-
- /*
- * From now on, we don't use freelist
- * although actual page can be freed in rcu context
- */
- if (OFF_SLAB(cachep))
- kfree(freelist);
-}
-
-/*
- * Update the size of the caches before calling slabs_destroy as it may
- * recursively call kfree.
- */
-static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
-{
- struct slab *slab, *n;
-
- list_for_each_entry_safe(slab, n, list, slab_list) {
- list_del(&slab->slab_list);
- slab_destroy(cachep, slab);
- }
-}
-
-/**
- * calculate_slab_order - calculate size (page order) of slabs
- * @cachep: pointer to the cache that is being created
- * @size: size of objects to be created in this cache.
- * @flags: slab allocation flags
- *
- * Also calculates the number of objects per slab.
- *
- * This could be made much more intelligent. For now, try to avoid using
- * high order pages for slabs. When the gfp() functions are more friendly
- * towards high-order requests, this should be changed.
- *
- * Return: number of left-over bytes in a slab
- */
-static size_t calculate_slab_order(struct kmem_cache *cachep,
- size_t size, slab_flags_t flags)
-{
- size_t left_over = 0;
- int gfporder;
-
- for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
- unsigned int num;
- size_t remainder;
-
- num = cache_estimate(gfporder, size, flags, &remainder);
- if (!num)
- continue;
-
- /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
- if (num > SLAB_OBJ_MAX_NUM)
- break;
-
- if (flags & CFLGS_OFF_SLAB) {
- struct kmem_cache *freelist_cache;
- size_t freelist_size;
- size_t freelist_cache_size;
-
- freelist_size = num * sizeof(freelist_idx_t);
- if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
- freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
- } else {
- freelist_cache = kmalloc_slab(freelist_size, 0u, _RET_IP_);
- if (!freelist_cache)
- continue;
- freelist_cache_size = freelist_cache->size;
-
- /*
- * Needed to avoid possible looping condition
- * in cache_grow_begin()
- */
- if (OFF_SLAB(freelist_cache))
- continue;
- }
-
- /* check if off slab has enough benefit */
- if (freelist_cache_size > cachep->size / 2)
- continue;
- }
-
- /* Found something acceptable - save it away */
- cachep->num = num;
- cachep->gfporder = gfporder;
- left_over = remainder;
-
- /*
- * A VFS-reclaimable slab tends to have most allocations
- * as GFP_NOFS and we really don't want to have to be allocating
- * higher-order pages when we are unable to shrink dcache.
- */
- if (flags & SLAB_RECLAIM_ACCOUNT)
- break;
-
- /*
- * Large number of objects is good, but very large slabs are
- * currently bad for the gfp()s.
- */
- if (gfporder >= slab_max_order)
- break;
-
- /*
- * Acceptable internal fragmentation?
- */
- if (left_over * 8 <= (PAGE_SIZE << gfporder))
- break;
- }
- return left_over;
-}
-
-static struct array_cache __percpu *alloc_kmem_cache_cpus(
- struct kmem_cache *cachep, int entries, int batchcount)
-{
- int cpu;
- size_t size;
- struct array_cache __percpu *cpu_cache;
-
- size = sizeof(void *) * entries + sizeof(struct array_cache);
- cpu_cache = __alloc_percpu(size, sizeof(void *));
-
- if (!cpu_cache)
- return NULL;
-
- for_each_possible_cpu(cpu) {
- init_arraycache(per_cpu_ptr(cpu_cache, cpu),
- entries, batchcount);
- }
-
- return cpu_cache;
-}
-
-static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
-{
- if (slab_state >= FULL)
- return enable_cpucache(cachep, gfp);
-
- cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
- if (!cachep->cpu_cache)
- return 1;
-
- if (slab_state == DOWN) {
- /* Creation of first cache (kmem_cache). */
- set_up_node(kmem_cache, CACHE_CACHE);
- } else if (slab_state == PARTIAL) {
- /* For kmem_cache_node */
- set_up_node(cachep, SIZE_NODE);
- } else {
- int node;
-
- for_each_online_node(node) {
- cachep->node[node] = kmalloc_node(
- sizeof(struct kmem_cache_node), gfp, node);
- BUG_ON(!cachep->node[node]);
- kmem_cache_node_init(cachep->node[node]);
- }
- }
-
- cachep->node[numa_mem_id()]->next_reap =
- jiffies + REAPTIMEOUT_NODE +
- ((unsigned long)cachep) % REAPTIMEOUT_NODE;
-
- cpu_cache_get(cachep)->avail = 0;
- cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
- cpu_cache_get(cachep)->batchcount = 1;
- cpu_cache_get(cachep)->touched = 0;
- cachep->batchcount = 1;
- cachep->limit = BOOT_CPUCACHE_ENTRIES;
- return 0;
-}
-
-slab_flags_t kmem_cache_flags(unsigned int object_size,
- slab_flags_t flags, const char *name)
-{
- return flags;
-}
-
-struct kmem_cache *
-__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
- slab_flags_t flags, void (*ctor)(void *))
-{
- struct kmem_cache *cachep;
-
- cachep = find_mergeable(size, align, flags, name, ctor);
- if (cachep) {
- cachep->refcount++;
-
- /*
- * Adjust the object sizes so that we clear
- * the complete object on kzalloc.
- */
- cachep->object_size = max_t(int, cachep->object_size, size);
- }
- return cachep;
-}
-
-static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
- size_t size, slab_flags_t flags)
-{
- size_t left;
-
- cachep->num = 0;
-
- /*
- * If slab auto-initialization on free is enabled, store the freelist
- * off-slab, so that its contents don't end up in one of the allocated
- * objects.
- */
- if (unlikely(slab_want_init_on_free(cachep)))
- return false;
-
- if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
- return false;
-
- left = calculate_slab_order(cachep, size,
- flags | CFLGS_OBJFREELIST_SLAB);
- if (!cachep->num)
- return false;
-
- if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
- return false;
-
- cachep->colour = left / cachep->colour_off;
-
- return true;
-}
-
-static bool set_off_slab_cache(struct kmem_cache *cachep,
- size_t size, slab_flags_t flags)
-{
- size_t left;
-
- cachep->num = 0;
-
- /*
- * Always use on-slab management when SLAB_NOLEAKTRACE
- * to avoid recursive calls into kmemleak.
- */
- if (flags & SLAB_NOLEAKTRACE)
- return false;
-
- /*
- * Size is large, assume best to place the slab management obj
- * off-slab (should allow better packing of objs).
- */
- left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
- if (!cachep->num)
- return false;
-
- /*
- * If the slab has been placed off-slab, and we have enough space then
- * move it on-slab. This is at the expense of any extra colouring.
- */
- if (left >= cachep->num * sizeof(freelist_idx_t))
- return false;
-
- cachep->colour = left / cachep->colour_off;
-
- return true;
-}
-
-static bool set_on_slab_cache(struct kmem_cache *cachep,
- size_t size, slab_flags_t flags)
-{
- size_t left;
-
- cachep->num = 0;
-
- left = calculate_slab_order(cachep, size, flags);
- if (!cachep->num)
- return false;
-
- cachep->colour = left / cachep->colour_off;
-
- return true;
-}
-
-/*
- * __kmem_cache_create - Create a cache.
- * @cachep: cache management descriptor
- * @flags: SLAB flags
- *
- * Returns zero on success, nonzero on failure.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- */
-int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
-{
- size_t ralign = BYTES_PER_WORD;
- gfp_t gfp;
- int err;
- unsigned int size = cachep->size;
-
-#if DEBUG
-#if FORCED_DEBUG
- /*
- * Enable redzoning and last user accounting, except for caches with
- * large objects, if the increased size would increase the object size
- * above the next power of two: caches with object sizes just above a
- * power of two have a significant amount of internal fragmentation.
- */
- if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
- 2 * sizeof(unsigned long long)))
- flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
- if (!(flags & SLAB_TYPESAFE_BY_RCU))
- flags |= SLAB_POISON;
-#endif
-#endif
-
- /*
- * Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
- * sure any on-slab bufctl's are also correctly aligned.
- */
- size = ALIGN(size, BYTES_PER_WORD);
-
- if (flags & SLAB_RED_ZONE) {
- ralign = REDZONE_ALIGN;
- /* If redzoning, ensure that the second redzone is suitably
- * aligned, by adjusting the object size accordingly. */
- size = ALIGN(size, REDZONE_ALIGN);
- }
-
- /* 3) caller mandated alignment */
- if (ralign < cachep->align) {
- ralign = cachep->align;
- }
- /* disable debug if necessary */
- if (ralign > __alignof__(unsigned long long))
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
- /*
- * 4) Store it.
- */
- cachep->align = ralign;
- cachep->colour_off = cache_line_size();
- /* Offset must be a multiple of the alignment. */
- if (cachep->colour_off < cachep->align)
- cachep->colour_off = cachep->align;
-
- if (slab_is_available())
- gfp = GFP_KERNEL;
- else
- gfp = GFP_NOWAIT;
-
-#if DEBUG
-
- /*
- * Both debugging options require word-alignment which is calculated
- * into align above.
- */
- if (flags & SLAB_RED_ZONE) {
- /* add space for red zone words */
- cachep->obj_offset += sizeof(unsigned long long);
- size += 2 * sizeof(unsigned long long);
- }
- if (flags & SLAB_STORE_USER) {
- /* user store requires one word storage behind the end of
- * the real object. But if the second red zone needs to be
- * aligned to 64 bits, we must allow that much space.
- */
- if (flags & SLAB_RED_ZONE)
- size += REDZONE_ALIGN;
- else
- size += BYTES_PER_WORD;
- }
-#endif
-
- kasan_cache_create(cachep, &size, &flags);
-
- size = ALIGN(size, cachep->align);
- /*
- * We should restrict the number of objects in a slab to implement
- * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
- */
- if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
- size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
-
-#if DEBUG
- /*
- * To activate debug pagealloc, off-slab management is necessary
- * requirement. In early phase of initialization, small sized slab
- * doesn't get initialized so it would not be possible. So, we need
- * to check size >= 256. It guarantees that all necessary small
- * sized slab is initialized in current slab initialization sequence.
- */
- if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
- size >= 256 && cachep->object_size > cache_line_size()) {
- if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
- size_t tmp_size = ALIGN(size, PAGE_SIZE);
-
- if (set_off_slab_cache(cachep, tmp_size, flags)) {
- flags |= CFLGS_OFF_SLAB;
- cachep->obj_offset += tmp_size - size;
- size = tmp_size;
- goto done;
- }
- }
- }
-#endif
-
- if (set_objfreelist_slab_cache(cachep, size, flags)) {
- flags |= CFLGS_OBJFREELIST_SLAB;
- goto done;
- }
-
- if (set_off_slab_cache(cachep, size, flags)) {
- flags |= CFLGS_OFF_SLAB;
- goto done;
- }
-
- if (set_on_slab_cache(cachep, size, flags))
- goto done;
-
- return -E2BIG;
-
-done:
- cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
- cachep->flags = flags;
- cachep->allocflags = __GFP_COMP;
- if (flags & SLAB_CACHE_DMA)
- cachep->allocflags |= GFP_DMA;
- if (flags & SLAB_CACHE_DMA32)
- cachep->allocflags |= GFP_DMA32;
- if (flags & SLAB_RECLAIM_ACCOUNT)
- cachep->allocflags |= __GFP_RECLAIMABLE;
- cachep->size = size;
- cachep->reciprocal_buffer_size = reciprocal_value(size);
-
-#if DEBUG
- /*
- * If we're going to use the generic kernel_map_pages()
- * poisoning, then it's going to smash the contents of
- * the redzone and userword anyhow, so switch them off.
- */
- if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
- (cachep->flags & SLAB_POISON) &&
- is_debug_pagealloc_cache(cachep))
- cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
-#endif
-
- err = setup_cpu_cache(cachep, gfp);
- if (err) {
- __kmem_cache_release(cachep);
- return err;
- }
-
- return 0;
-}
-
-#if DEBUG
-static void check_irq_off(void)
-{
- BUG_ON(!irqs_disabled());
-}
-
-static void check_irq_on(void)
-{
- BUG_ON(irqs_disabled());
-}
-
-static void check_mutex_acquired(void)
-{
- BUG_ON(!mutex_is_locked(&slab_mutex));
-}
-
-static void check_spinlock_acquired(struct kmem_cache *cachep)
-{
-#ifdef CONFIG_SMP
- check_irq_off();
- assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
-#endif
-}
-
-static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
-{
-#ifdef CONFIG_SMP
- check_irq_off();
- assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
-#endif
-}
-
-#else
-#define check_irq_off() do { } while(0)
-#define check_irq_on() do { } while(0)
-#define check_mutex_acquired() do { } while(0)
-#define check_spinlock_acquired(x) do { } while(0)
-#define check_spinlock_acquired_node(x, y) do { } while(0)
-#endif
-
-static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
- int node, bool free_all, struct list_head *list)
-{
- int tofree;
-
- if (!ac || !ac->avail)
- return;
-
- tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
- if (tofree > ac->avail)
- tofree = (ac->avail + 1) / 2;
-
- free_block(cachep, ac->entry, tofree, node, list);
- ac->avail -= tofree;
- memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
-}
-
-static void do_drain(void *arg)
-{
- struct kmem_cache *cachep = arg;
- struct array_cache *ac;
- int node = numa_mem_id();
- struct kmem_cache_node *n;
- LIST_HEAD(list);
-
- check_irq_off();
- ac = cpu_cache_get(cachep);
- n = get_node(cachep, node);
- raw_spin_lock(&n->list_lock);
- free_block(cachep, ac->entry, ac->avail, node, &list);
- raw_spin_unlock(&n->list_lock);
- ac->avail = 0;
- slabs_destroy(cachep, &list);
-}
-
-static void drain_cpu_caches(struct kmem_cache *cachep)
-{
- struct kmem_cache_node *n;
- int node;
- LIST_HEAD(list);
-
- on_each_cpu(do_drain, cachep, 1);
- check_irq_on();
- for_each_kmem_cache_node(cachep, node, n)
- if (n->alien)
- drain_alien_cache(cachep, n->alien);
-
- for_each_kmem_cache_node(cachep, node, n) {
- raw_spin_lock_irq(&n->list_lock);
- drain_array_locked(cachep, n->shared, node, true, &list);
- raw_spin_unlock_irq(&n->list_lock);
-
- slabs_destroy(cachep, &list);
- }
-}
-
-/*
- * Remove slabs from the list of free slabs.
- * Specify the number of slabs to drain in tofree.
- *
- * Returns the actual number of slabs released.
- */
-static int drain_freelist(struct kmem_cache *cache,
- struct kmem_cache_node *n, int tofree)
-{
- struct list_head *p;
- int nr_freed;
- struct slab *slab;
-
- nr_freed = 0;
- while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
-
- raw_spin_lock_irq(&n->list_lock);
- p = n->slabs_free.prev;
- if (p == &n->slabs_free) {
- raw_spin_unlock_irq(&n->list_lock);
- goto out;
- }
-
- slab = list_entry(p, struct slab, slab_list);
- list_del(&slab->slab_list);
- n->free_slabs--;
- n->total_slabs--;
- /*
- * Safe to drop the lock. The slab is no longer linked
- * to the cache.
- */
- n->free_objects -= cache->num;
- raw_spin_unlock_irq(&n->list_lock);
- slab_destroy(cache, slab);
- nr_freed++;
-
- cond_resched();
- }
-out:
- return nr_freed;
-}
-
-bool __kmem_cache_empty(struct kmem_cache *s)
-{
- int node;
- struct kmem_cache_node *n;
-
- for_each_kmem_cache_node(s, node, n)
- if (!list_empty(&n->slabs_full) ||
- !list_empty(&n->slabs_partial))
- return false;
- return true;
-}
-
-int __kmem_cache_shrink(struct kmem_cache *cachep)
-{
- int ret = 0;
- int node;
- struct kmem_cache_node *n;
-
- drain_cpu_caches(cachep);
-
- check_irq_on();
- for_each_kmem_cache_node(cachep, node, n) {
- drain_freelist(cachep, n, INT_MAX);
-
- ret += !list_empty(&n->slabs_full) ||
- !list_empty(&n->slabs_partial);
- }
- return (ret ? 1 : 0);
-}
-
-int __kmem_cache_shutdown(struct kmem_cache *cachep)
-{
- return __kmem_cache_shrink(cachep);
-}
-
-void __kmem_cache_release(struct kmem_cache *cachep)
-{
- int i;
- struct kmem_cache_node *n;
-
- cache_random_seq_destroy(cachep);
-
- free_percpu(cachep->cpu_cache);
-
- /* NUMA: free the node structures */
- for_each_kmem_cache_node(cachep, i, n) {
- kfree(n->shared);
- free_alien_cache(n->alien);
- kfree(n);
- cachep->node[i] = NULL;
- }
-}
-
-/*
- * Get the memory for a slab management obj.
- *
- * For a slab cache when the slab descriptor is off-slab, the
- * slab descriptor can't come from the same cache which is being created,
- * Because if it is the case, that means we defer the creation of
- * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
- * And we eventually call down to __kmem_cache_create(), which
- * in turn looks up in the kmalloc_{dma,}_caches for the desired-size one.
- * This is a "chicken-and-egg" problem.
- *
- * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
- * which are all initialized during kmem_cache_init().
- */
-static void *alloc_slabmgmt(struct kmem_cache *cachep,
- struct slab *slab, int colour_off,
- gfp_t local_flags, int nodeid)
-{
- void *freelist;
- void *addr = slab_address(slab);
-
- slab->s_mem = addr + colour_off;
- slab->active = 0;
-
- if (OBJFREELIST_SLAB(cachep))
- freelist = NULL;
- else if (OFF_SLAB(cachep)) {
- /* Slab management obj is off-slab. */
- freelist = kmalloc_node(cachep->freelist_size,
- local_flags, nodeid);
- } else {
- /* We will use last bytes at the slab for freelist */
- freelist = addr + (PAGE_SIZE << cachep->gfporder) -
- cachep->freelist_size;
- }
-
- return freelist;
-}
-
-static inline freelist_idx_t get_free_obj(struct slab *slab, unsigned int idx)
-{
- return ((freelist_idx_t *) slab->freelist)[idx];
-}
-
-static inline void set_free_obj(struct slab *slab,
- unsigned int idx, freelist_idx_t val)
-{
- ((freelist_idx_t *)(slab->freelist))[idx] = val;
-}
-
-static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
-{
-#if DEBUG
- int i;
-
- for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slab, i);
-
- if (cachep->flags & SLAB_STORE_USER)
- *dbg_userword(cachep, objp) = NULL;
-
- if (cachep->flags & SLAB_RED_ZONE) {
- *dbg_redzone1(cachep, objp) = RED_INACTIVE;
- *dbg_redzone2(cachep, objp) = RED_INACTIVE;
- }
- /*
- * Constructors are not allowed to allocate memory from the same
- * cache which they are a constructor for. Otherwise, deadlock.
- * They must also be threaded.
- */
- if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
- kasan_unpoison_object_data(cachep,
- objp + obj_offset(cachep));
- cachep->ctor(objp + obj_offset(cachep));
- kasan_poison_object_data(
- cachep, objp + obj_offset(cachep));
- }
-
- if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "constructor overwrote the end of an object");
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "constructor overwrote the start of an object");
- }
- /* need to poison the objs? */
- if (cachep->flags & SLAB_POISON) {
- poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
- }
- }
-#endif
-}
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
-/* Hold information during a freelist initialization */
-struct freelist_init_state {
- unsigned int pos;
- unsigned int *list;
- unsigned int count;
-};
-
-/*
- * Initialize the state based on the randomization method available.
- * return true if the pre-computed list is available, false otherwise.
- */
-static bool freelist_state_initialize(struct freelist_init_state *state,
- struct kmem_cache *cachep,
- unsigned int count)
-{
- bool ret;
- if (!cachep->random_seq) {
- ret = false;
- } else {
- state->list = cachep->random_seq;
- state->count = count;
- state->pos = get_random_u32_below(count);
- ret = true;
- }
- return ret;
-}
-
-/* Get the next entry on the list and randomize it using a random shift */
-static freelist_idx_t next_random_slot(struct freelist_init_state *state)
-{
- if (state->pos >= state->count)
- state->pos = 0;
- return state->list[state->pos++];
-}
-
-/* Swap two freelist entries */
-static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b)
-{
- swap(((freelist_idx_t *) slab->freelist)[a],
- ((freelist_idx_t *) slab->freelist)[b]);
-}
-
-/*
- * Shuffle the freelist initialization state based on pre-computed lists.
- * return true if the list was successfully shuffled, false otherwise.
- */
-static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab)
-{
- unsigned int objfreelist = 0, i, rand, count = cachep->num;
- struct freelist_init_state state;
- bool precomputed;
-
- if (count < 2)
- return false;
-
- precomputed = freelist_state_initialize(&state, cachep, count);
-
- /* Take a random entry as the objfreelist */
- if (OBJFREELIST_SLAB(cachep)) {
- if (!precomputed)
- objfreelist = count - 1;
- else
- objfreelist = next_random_slot(&state);
- slab->freelist = index_to_obj(cachep, slab, objfreelist) +
- obj_offset(cachep);
- count--;
- }
-
- /*
- * On early boot, generate the list dynamically.
- * Later use a pre-computed list for speed.
- */
- if (!precomputed) {
- for (i = 0; i < count; i++)
- set_free_obj(slab, i, i);
-
- /* Fisher-Yates shuffle */
- for (i = count - 1; i > 0; i--) {
- rand = get_random_u32_below(i + 1);
- swap_free_obj(slab, i, rand);
- }
- } else {
- for (i = 0; i < count; i++)
- set_free_obj(slab, i, next_random_slot(&state));
- }
-
- if (OBJFREELIST_SLAB(cachep))
- set_free_obj(slab, cachep->num - 1, objfreelist);
-
- return true;
-}
-#else
-static inline bool shuffle_freelist(struct kmem_cache *cachep,
- struct slab *slab)
-{
- return false;
-}
-#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-
-static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slab)
-{
- int i;
- void *objp;
- bool shuffled;
-
- cache_init_objs_debug(cachep, slab);
-
- /* Try to randomize the freelist if enabled */
- shuffled = shuffle_freelist(cachep, slab);
-
- if (!shuffled && OBJFREELIST_SLAB(cachep)) {
- slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) +
- obj_offset(cachep);
- }
-
- for (i = 0; i < cachep->num; i++) {
- objp = index_to_obj(cachep, slab, i);
- objp = kasan_init_slab_obj(cachep, objp);
-
- /* constructor could break poison info */
- if (DEBUG == 0 && cachep->ctor) {
- kasan_unpoison_object_data(cachep, objp);
- cachep->ctor(objp);
- kasan_poison_object_data(cachep, objp);
- }
-
- if (!shuffled)
- set_free_obj(slab, i, i);
- }
-}
-
-static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab)
-{
- void *objp;
-
- objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active));
- slab->active++;
-
- return objp;
-}
-
-static void slab_put_obj(struct kmem_cache *cachep,
- struct slab *slab, void *objp)
-{
- unsigned int objnr = obj_to_index(cachep, slab, objp);
-#if DEBUG
- unsigned int i;
-
- /* Verify double free bug */
- for (i = slab->active; i < cachep->num; i++) {
- if (get_free_obj(slab, i) == objnr) {
- pr_err("slab: double free detected in cache '%s', objp %px\n",
- cachep->name, objp);
- BUG();
- }
- }
-#endif
- slab->active--;
- if (!slab->freelist)
- slab->freelist = objp + obj_offset(cachep);
-
- set_free_obj(slab, slab->active, objnr);
-}
-
-/*
- * Grow (by 1) the number of slabs within a cache. This is called by
- * kmem_cache_alloc() when there are no active objs left in a cache.
- */
-static struct slab *cache_grow_begin(struct kmem_cache *cachep,
- gfp_t flags, int nodeid)
-{
- void *freelist;
- size_t offset;
- gfp_t local_flags;
- int slab_node;
- struct kmem_cache_node *n;
- struct slab *slab;
-
- /*
- * Be lazy and only check for valid flags here, keeping it out of the
- * critical path in kmem_cache_alloc().
- */
- if (unlikely(flags & GFP_SLAB_BUG_MASK))
- flags = kmalloc_fix_flags(flags);
-
- WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
- local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
-
- check_irq_off();
- if (gfpflags_allow_blocking(local_flags))
- local_irq_enable();
-
- /*
- * Get mem for the objs. Attempt to allocate a physical page from
- * 'nodeid'.
- */
- slab = kmem_getpages(cachep, local_flags, nodeid);
- if (!slab)
- goto failed;
-
- slab_node = slab_nid(slab);
- n = get_node(cachep, slab_node);
-
- /* Get colour for the slab, and cal the next value. */
- n->colour_next++;
- if (n->colour_next >= cachep->colour)
- n->colour_next = 0;
-
- offset = n->colour_next;
- if (offset >= cachep->colour)
- offset = 0;
-
- offset *= cachep->colour_off;
-
- /*
- * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
- * page_address() in the latter returns a non-tagged pointer,
- * as it should be for slab pages.
- */
- kasan_poison_slab(slab);
-
- /* Get slab management. */
- freelist = alloc_slabmgmt(cachep, slab, offset,
- local_flags & ~GFP_CONSTRAINT_MASK, slab_node);
- if (OFF_SLAB(cachep) && !freelist)
- goto opps1;
-
- slab->slab_cache = cachep;
- slab->freelist = freelist;
-
- cache_init_objs(cachep, slab);
-
- if (gfpflags_allow_blocking(local_flags))
- local_irq_disable();
-
- return slab;
-
-opps1:
- kmem_freepages(cachep, slab);
-failed:
- if (gfpflags_allow_blocking(local_flags))
- local_irq_disable();
- return NULL;
-}
-
-static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
-{
- struct kmem_cache_node *n;
- void *list = NULL;
-
- check_irq_off();
-
- if (!slab)
- return;
-
- INIT_LIST_HEAD(&slab->slab_list);
- n = get_node(cachep, slab_nid(slab));
-
- raw_spin_lock(&n->list_lock);
- n->total_slabs++;
- if (!slab->active) {
- list_add_tail(&slab->slab_list, &n->slabs_free);
- n->free_slabs++;
- } else
- fixup_slab_list(cachep, n, slab, &list);
-
- STATS_INC_GROWN(cachep);
- n->free_objects += cachep->num - slab->active;
- raw_spin_unlock(&n->list_lock);
-
- fixup_objfreelist_debug(cachep, &list);
-}
-
-#if DEBUG
-
-/*
- * Perform extra freeing checks:
- * - detect bad pointers.
- * - POISON/RED_ZONE checking
- */
-static void kfree_debugcheck(const void *objp)
-{
- if (!virt_addr_valid(objp)) {
- pr_err("kfree_debugcheck: out of range ptr %lxh\n",
- (unsigned long)objp);
- BUG();
- }
-}
-
-static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
-{
- unsigned long long redzone1, redzone2;
-
- redzone1 = *dbg_redzone1(cache, obj);
- redzone2 = *dbg_redzone2(cache, obj);
-
- /*
- * Redzone is ok.
- */
- if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
- return;
-
- if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
- slab_error(cache, "double free detected");
- else
- slab_error(cache, "memory outside object was overwritten");
-
- pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
- obj, redzone1, redzone2);
-}
-
-static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
-{
- unsigned int objnr;
- struct slab *slab;
-
- BUG_ON(virt_to_cache(objp) != cachep);
-
- objp -= obj_offset(cachep);
- kfree_debugcheck(objp);
- slab = virt_to_slab(objp);
-
- if (cachep->flags & SLAB_RED_ZONE) {
- verify_redzone_free(cachep, objp);
- *dbg_redzone1(cachep, objp) = RED_INACTIVE;
- *dbg_redzone2(cachep, objp) = RED_INACTIVE;
- }
- if (cachep->flags & SLAB_STORE_USER)
- *dbg_userword(cachep, objp) = (void *)caller;
-
- objnr = obj_to_index(cachep, slab, objp);
-
- BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != index_to_obj(cachep, slab, objnr));
-
- if (cachep->flags & SLAB_POISON) {
- poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
- }
- return objp;
-}
-
-#else
-#define kfree_debugcheck(x) do { } while(0)
-#define cache_free_debugcheck(x, objp, z) (objp)
-#endif
-
-static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
- void **list)
-{
-#if DEBUG
- void *next = *list;
- void *objp;
-
- while (next) {
- objp = next - obj_offset(cachep);
- next = *(void **)next;
- poison_obj(cachep, objp, POISON_FREE);
- }
-#endif
-}
-
-static inline void fixup_slab_list(struct kmem_cache *cachep,
- struct kmem_cache_node *n, struct slab *slab,
- void **list)
-{
- /* move slabp to correct slabp list: */
- list_del(&slab->slab_list);
- if (slab->active == cachep->num) {
- list_add(&slab->slab_list, &n->slabs_full);
- if (OBJFREELIST_SLAB(cachep)) {
-#if DEBUG
- /* Poisoning will be done without holding the lock */
- if (cachep->flags & SLAB_POISON) {
- void **objp = slab->freelist;
-
- *objp = *list;
- *list = objp;
- }
-#endif
- slab->freelist = NULL;
- }
- } else
- list_add(&slab->slab_list, &n->slabs_partial);
-}
-
-/* Try to find non-pfmemalloc slab if needed */
-static noinline struct slab *get_valid_first_slab(struct kmem_cache_node *n,
- struct slab *slab, bool pfmemalloc)
-{
- if (!slab)
- return NULL;
-
- if (pfmemalloc)
- return slab;
-
- if (!slab_test_pfmemalloc(slab))
- return slab;
-
- /* No need to keep pfmemalloc slab if we have enough free objects */
- if (n->free_objects > n->free_limit) {
- slab_clear_pfmemalloc(slab);
- return slab;
- }
-
- /* Move pfmemalloc slab to the end of list to speed up next search */
- list_del(&slab->slab_list);
- if (!slab->active) {
- list_add_tail(&slab->slab_list, &n->slabs_free);
- n->free_slabs++;
- } else
- list_add_tail(&slab->slab_list, &n->slabs_partial);
-
- list_for_each_entry(slab, &n->slabs_partial, slab_list) {
- if (!slab_test_pfmemalloc(slab))
- return slab;
- }
-
- n->free_touched = 1;
- list_for_each_entry(slab, &n->slabs_free, slab_list) {
- if (!slab_test_pfmemalloc(slab)) {
- n->free_slabs--;
- return slab;
- }
- }
-
- return NULL;
-}
-
-static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
-{
- struct slab *slab;
-
- assert_raw_spin_locked(&n->list_lock);
- slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
- slab_list);
- if (!slab) {
- n->free_touched = 1;
- slab = list_first_entry_or_null(&n->slabs_free, struct slab,
- slab_list);
- if (slab)
- n->free_slabs--;
- }
-
- if (sk_memalloc_socks())
- slab = get_valid_first_slab(n, slab, pfmemalloc);
-
- return slab;
-}
-
-static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
- struct kmem_cache_node *n, gfp_t flags)
-{
- struct slab *slab;
- void *obj;
- void *list = NULL;
-
- if (!gfp_pfmemalloc_allowed(flags))
- return NULL;
-
- raw_spin_lock(&n->list_lock);
- slab = get_first_slab(n, true);
- if (!slab) {
- raw_spin_unlock(&n->list_lock);
- return NULL;
- }
-
- obj = slab_get_obj(cachep, slab);
- n->free_objects--;
-
- fixup_slab_list(cachep, n, slab, &list);
-
- raw_spin_unlock(&n->list_lock);
- fixup_objfreelist_debug(cachep, &list);
-
- return obj;
-}
-
-/*
- * Slab list should be fixed up by fixup_slab_list() for existing slab
- * or cache_grow_end() for new slab
- */
-static __always_inline int alloc_block(struct kmem_cache *cachep,
- struct array_cache *ac, struct slab *slab, int batchcount)
-{
- /*
- * There must be at least one object available for
- * allocation.
- */
- BUG_ON(slab->active >= cachep->num);
-
- while (slab->active < cachep->num && batchcount--) {
- STATS_INC_ALLOCED(cachep);
- STATS_INC_ACTIVE(cachep);
- STATS_SET_HIGH(cachep);
-
- ac->entry[ac->avail++] = slab_get_obj(cachep, slab);
- }
-
- return batchcount;
-}
-
-static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
-{
- int batchcount;
- struct kmem_cache_node *n;
- struct array_cache *ac, *shared;
- int node;
- void *list = NULL;
- struct slab *slab;
-
- check_irq_off();
- node = numa_mem_id();
-
- ac = cpu_cache_get(cachep);
- batchcount = ac->batchcount;
- if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
- /*
- * If there was little recent activity on this cache, then
- * perform only a partial refill. Otherwise we could generate
- * refill bouncing.
- */
- batchcount = BATCHREFILL_LIMIT;
- }
- n = get_node(cachep, node);
-
- BUG_ON(ac->avail > 0 || !n);
- shared = READ_ONCE(n->shared);
- if (!n->free_objects && (!shared || !shared->avail))
- goto direct_grow;
-
- raw_spin_lock(&n->list_lock);
- shared = READ_ONCE(n->shared);
-
- /* See if we can refill from the shared array */
- if (shared && transfer_objects(ac, shared, batchcount)) {
- shared->touched = 1;
- goto alloc_done;
- }
-
- while (batchcount > 0) {
- /* Get slab alloc is to come from. */
- slab = get_first_slab(n, false);
- if (!slab)
- goto must_grow;
-
- check_spinlock_acquired(cachep);
-
- batchcount = alloc_block(cachep, ac, slab, batchcount);
- fixup_slab_list(cachep, n, slab, &list);
- }
-
-must_grow:
- n->free_objects -= ac->avail;
-alloc_done:
- raw_spin_unlock(&n->list_lock);
- fixup_objfreelist_debug(cachep, &list);
-
-direct_grow:
- if (unlikely(!ac->avail)) {
- /* Check if we can use obj in pfmemalloc slab */
- if (sk_memalloc_socks()) {
- void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
-
- if (obj)
- return obj;
- }
-
- slab = cache_grow_begin(cachep, gfp_exact_node(flags), node);
-
- /*
- * cache_grow_begin() can reenable interrupts,
- * then ac could change.
- */
- ac = cpu_cache_get(cachep);
- if (!ac->avail && slab)
- alloc_block(cachep, ac, slab, batchcount);
- cache_grow_end(cachep, slab);
-
- if (!ac->avail)
- return NULL;
- }
- ac->touched = 1;
-
- return ac->entry[--ac->avail];
-}
-
-#if DEBUG
-static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
- gfp_t flags, void *objp, unsigned long caller)
-{
- WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
- if (!objp || is_kfence_address(objp))
- return objp;
- if (cachep->flags & SLAB_POISON) {
- check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
- poison_obj(cachep, objp, POISON_INUSE);
- }
- if (cachep->flags & SLAB_STORE_USER)
- *dbg_userword(cachep, objp) = (void *)caller;
-
- if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
- *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
- slab_error(cachep, "double free, or memory outside object was overwritten");
- pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
- objp, *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
- }
- *dbg_redzone1(cachep, objp) = RED_ACTIVE;
- *dbg_redzone2(cachep, objp) = RED_ACTIVE;
- }
-
- objp += obj_offset(cachep);
- if (cachep->ctor && cachep->flags & SLAB_POISON)
- cachep->ctor(objp);
- if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
- pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp,
- arch_slab_minalign());
- }
- return objp;
-}
-#else
-#define cache_alloc_debugcheck_after(a, b, objp, d) (objp)
-#endif
-
-static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
-{
- void *objp;
- struct array_cache *ac;
-
- check_irq_off();
-
- ac = cpu_cache_get(cachep);
- if (likely(ac->avail)) {
- ac->touched = 1;
- objp = ac->entry[--ac->avail];
-
- STATS_INC_ALLOCHIT(cachep);
- goto out;
- }
-
- STATS_INC_ALLOCMISS(cachep);
- objp = cache_alloc_refill(cachep, flags);
- /*
- * the 'ac' may be updated by cache_alloc_refill(),
- * and kmemleak_erase() requires its correct value.
- */
- ac = cpu_cache_get(cachep);
-
-out:
- /*
- * To avoid a false negative, if an object that is in one of the
- * per-CPU caches is leaked, we need to make sure kmemleak doesn't
- * treat the array pointers as a reference to the object.
- */
- if (objp)
- kmemleak_erase(&ac->entry[ac->avail]);
- return objp;
-}
-
-#ifdef CONFIG_NUMA
-static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
-
-/*
- * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
- *
- * If we are in_interrupt, then process context, including cpusets and
- * mempolicy, may not apply and should not be used for allocation policy.
- */
-static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
-{
- int nid_alloc, nid_here;
-
- if (in_interrupt() || (flags & __GFP_THISNODE))
- return NULL;
- nid_alloc = nid_here = numa_mem_id();
- if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
- nid_alloc = cpuset_slab_spread_node();
- else if (current->mempolicy)
- nid_alloc = mempolicy_slab_node();
- if (nid_alloc != nid_here)
- return ____cache_alloc_node(cachep, flags, nid_alloc);
- return NULL;
-}
-
-/*
- * Fallback function if there was no memory available and no objects on a
- * certain node and fall back is permitted. First we scan all the
- * available node for available objects. If that fails then we
- * perform an allocation without specifying a node. This allows the page
- * allocator to do its reclaim / fallback magic. We then insert the
- * slab into the proper nodelist and then allocate from it.
- */
-static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
-{
- struct zonelist *zonelist;
- struct zoneref *z;
- struct zone *zone;
- enum zone_type highest_zoneidx = gfp_zone(flags);
- void *obj = NULL;
- struct slab *slab;
- int nid;
- unsigned int cpuset_mems_cookie;
-
- if (flags & __GFP_THISNODE)
- return NULL;
-
-retry_cpuset:
- cpuset_mems_cookie = read_mems_allowed_begin();
- zonelist = node_zonelist(mempolicy_slab_node(), flags);
-
-retry:
- /*
- * Look through allowed nodes for objects available
- * from existing per node queues.
- */
- for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
- nid = zone_to_nid(zone);
-
- if (cpuset_zone_allowed(zone, flags) &&
- get_node(cache, nid) &&
- get_node(cache, nid)->free_objects) {
- obj = ____cache_alloc_node(cache,
- gfp_exact_node(flags), nid);
- if (obj)
- break;
- }
- }
-
- if (!obj) {
- /*
- * This allocation will be performed within the constraints
- * of the current cpuset / memory policy requirements.
- * We may trigger various forms of reclaim on the allowed
- * set and go into memory reserves if necessary.
- */
- slab = cache_grow_begin(cache, flags, numa_mem_id());
- cache_grow_end(cache, slab);
- if (slab) {
- nid = slab_nid(slab);
- obj = ____cache_alloc_node(cache,
- gfp_exact_node(flags), nid);
-
- /*
- * Another processor may allocate the objects in
- * the slab since we are not holding any locks.
- */
- if (!obj)
- goto retry;
- }
- }
-
- if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
- goto retry_cpuset;
- return obj;
-}
-
-/*
- * An interface to enable slab creation on nodeid
- */
-static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid)
-{
- struct slab *slab;
- struct kmem_cache_node *n;
- void *obj = NULL;
- void *list = NULL;
-
- VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
- n = get_node(cachep, nodeid);
- BUG_ON(!n);
-
- check_irq_off();
- raw_spin_lock(&n->list_lock);
- slab = get_first_slab(n, false);
- if (!slab)
- goto must_grow;
-
- check_spinlock_acquired_node(cachep, nodeid);
-
- STATS_INC_NODEALLOCS(cachep);
- STATS_INC_ACTIVE(cachep);
- STATS_SET_HIGH(cachep);
-
- BUG_ON(slab->active == cachep->num);
-
- obj = slab_get_obj(cachep, slab);
- n->free_objects--;
-
- fixup_slab_list(cachep, n, slab, &list);
-
- raw_spin_unlock(&n->list_lock);
- fixup_objfreelist_debug(cachep, &list);
- return obj;
-
-must_grow:
- raw_spin_unlock(&n->list_lock);
- slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
- if (slab) {
- /* This slab isn't counted yet so don't update free_objects */
- obj = slab_get_obj(cachep, slab);
- }
- cache_grow_end(cachep, slab);
-
- return obj ? obj : fallback_alloc(cachep, flags);
-}
-
-static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
-{
- void *objp = NULL;
- int slab_node = numa_mem_id();
-
- if (nodeid == NUMA_NO_NODE) {
- if (current->mempolicy || cpuset_do_slab_mem_spread()) {
- objp = alternate_node_alloc(cachep, flags);
- if (objp)
- goto out;
- }
- /*
- * Use the locally cached objects if possible.
- * However ____cache_alloc does not allow fallback
- * to other nodes. It may fail while we still have
- * objects on other nodes available.
- */
- objp = ____cache_alloc(cachep, flags);
- nodeid = slab_node;
- } else if (nodeid == slab_node) {
- objp = ____cache_alloc(cachep, flags);
- } else if (!get_node(cachep, nodeid)) {
- /* Node not bootstrapped yet */
- objp = fallback_alloc(cachep, flags);
- goto out;
- }
-
- /*
- * We may just have run out of memory on the local node.
- * ____cache_alloc_node() knows how to locate memory on other nodes
- */
- if (!objp)
- objp = ____cache_alloc_node(cachep, flags, nodeid);
-out:
- return objp;
-}
-#else
-
-static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
-{
- return ____cache_alloc(cachep, flags);
-}
-
-#endif /* CONFIG_NUMA */
-
-static __always_inline void *
-slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
- int nodeid, size_t orig_size, unsigned long caller)
-{
- unsigned long save_flags;
- void *objp;
- struct obj_cgroup *objcg = NULL;
- bool init = false;
-
- flags &= gfp_allowed_mask;
- cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags);
- if (unlikely(!cachep))
- return NULL;
-
- objp = kfence_alloc(cachep, orig_size, flags);
- if (unlikely(objp))
- goto out;
-
- local_irq_save(save_flags);
- objp = __do_cache_alloc(cachep, flags, nodeid);
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
- prefetchw(objp);
- init = slab_want_init_on_alloc(flags, cachep);
-
-out:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init,
- cachep->object_size);
- return objp;
-}
-
-static __always_inline void *
-slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
- size_t orig_size, unsigned long caller)
-{
- return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size,
- caller);
-}
-
-/*
- * Caller needs to acquire correct kmem_cache_node's list_lock
- * @list: List of detached free slabs should be freed by caller
- */
-static void free_block(struct kmem_cache *cachep, void **objpp,
- int nr_objects, int node, struct list_head *list)
-{
- int i;
- struct kmem_cache_node *n = get_node(cachep, node);
- struct slab *slab;
-
- n->free_objects += nr_objects;
-
- for (i = 0; i < nr_objects; i++) {
- void *objp;
- struct slab *slab;
-
- objp = objpp[i];
-
- slab = virt_to_slab(objp);
- list_del(&slab->slab_list);
- check_spinlock_acquired_node(cachep, node);
- slab_put_obj(cachep, slab, objp);
- STATS_DEC_ACTIVE(cachep);
-
- /* fixup slab chains */
- if (slab->active == 0) {
- list_add(&slab->slab_list, &n->slabs_free);
- n->free_slabs++;
- } else {
- /* Unconditionally move a slab to the end of the
- * partial list on free - maximum time for the
- * other objects to be freed, too.
- */
- list_add_tail(&slab->slab_list, &n->slabs_partial);
- }
- }
-
- while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
- n->free_objects -= cachep->num;
-
- slab = list_last_entry(&n->slabs_free, struct slab, slab_list);
- list_move(&slab->slab_list, list);
- n->free_slabs--;
- n->total_slabs--;
- }
-}
-
-static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
-{
- int batchcount;
- struct kmem_cache_node *n;
- int node = numa_mem_id();
- LIST_HEAD(list);
-
- batchcount = ac->batchcount;
-
- check_irq_off();
- n = get_node(cachep, node);
- raw_spin_lock(&n->list_lock);
- if (n->shared) {
- struct array_cache *shared_array = n->shared;
- int max = shared_array->limit - shared_array->avail;
- if (max) {
- if (batchcount > max)
- batchcount = max;
- memcpy(&(shared_array->entry[shared_array->avail]),
- ac->entry, sizeof(void *) * batchcount);
- shared_array->avail += batchcount;
- goto free_done;
- }
- }
-
- free_block(cachep, ac->entry, batchcount, node, &list);
-free_done:
-#if STATS
- {
- int i = 0;
- struct slab *slab;
-
- list_for_each_entry(slab, &n->slabs_free, slab_list) {
- BUG_ON(slab->active);
-
- i++;
- }
- STATS_SET_FREEABLE(cachep, i);
- }
-#endif
- raw_spin_unlock(&n->list_lock);
- ac->avail -= batchcount;
- memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
- slabs_destroy(cachep, &list);
-}
-
-/*
- * Release an obj back to its cache. If the obj has a constructed state, it must
- * be in this state _before_ it is released. Called with disabled ints.
- */
-static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
-{
- bool init;
-
- memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
-
- if (is_kfence_address(objp)) {
- kmemleak_free_recursive(objp, cachep->flags);
- __kfence_free(objp);
- return;
- }
-
- /*
- * As memory initialization might be integrated into KASAN,
- * kasan_slab_free and initialization memset must be
- * kept together to avoid discrepancies in behavior.
- */
- init = slab_want_init_on_free(cachep);
- if (init && !kasan_has_integrated_init())
- memset(objp, 0, cachep->object_size);
- /* KASAN might put objp into memory quarantine, delaying its reuse. */
- if (kasan_slab_free(cachep, objp, init))
- return;
-
- /* Use KCSAN to help debug racy use-after-free. */
- if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
- __kcsan_check_access(objp, cachep->object_size,
- KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
-
- ___cache_free(cachep, objp, caller);
-}
-
-void ___cache_free(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
-{
- struct array_cache *ac = cpu_cache_get(cachep);
-
- check_irq_off();
- kmemleak_free_recursive(objp, cachep->flags);
- objp = cache_free_debugcheck(cachep, objp, caller);
-
- /*
- * Skip calling cache_free_alien() when the platform is not numa.
- * This will avoid cache misses that happen while accessing slabp (which
- * is per page memory reference) to get nodeid. Instead use a global
- * variable to skip the call, which is mostly likely to be present in
- * the cache.
- */
- if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
- return;
-
- if (ac->avail < ac->limit) {
- STATS_INC_FREEHIT(cachep);
- } else {
- STATS_INC_FREEMISS(cachep);
- cache_flusharray(cachep, ac);
- }
-
- if (sk_memalloc_socks()) {
- struct slab *slab = virt_to_slab(objp);
-
- if (unlikely(slab_test_pfmemalloc(slab))) {
- cache_free_pfmemalloc(cachep, slab, objp);
- return;
- }
- }
-
- __free_one(ac, objp);
-}
-
-static __always_inline
-void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
- gfp_t flags)
-{
- void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
-
- trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, NUMA_NO_NODE);
-
- return ret;
-}
-
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
-{
- return __kmem_cache_alloc_lru(cachep, NULL, flags);
-}
-EXPORT_SYMBOL(kmem_cache_alloc);
-
-void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
- gfp_t flags)
-{
- return __kmem_cache_alloc_lru(cachep, lru, flags);
-}
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
-
-static __always_inline void
-cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, unsigned long caller)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
-}
-
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
-{
- struct obj_cgroup *objcg = NULL;
- unsigned long irqflags;
- size_t i;
-
- s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
- if (!s)
- return 0;
-
- local_irq_save(irqflags);
- for (i = 0; i < size; i++) {
- void *objp = kfence_alloc(s, s->object_size, flags) ?:
- __do_cache_alloc(s, flags, NUMA_NO_NODE);
-
- if (unlikely(!objp))
- goto error;
- p[i] = objp;
- }
- local_irq_restore(irqflags);
-
- cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
-
- /*
- * memcg and kmem_cache debug support and memory initialization.
- * Done outside of the IRQ disabled section.
- */
- slab_post_alloc_hook(s, objcg, flags, size, p,
- slab_want_init_on_alloc(flags, s), s->object_size);
- /* FIXME: Trace call missing. Christoph would like a bulk variant */
- return size;
-error:
- local_irq_restore(irqflags);
- cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
- return 0;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
-
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- *
- * Return: pointer to the new object or %NULL in case of error
- */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
-{
- void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
-
- trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, nodeid);
-
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_node);
-
-void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid, size_t orig_size,
- unsigned long caller)
-{
- return slab_alloc_node(cachep, NULL, flags, nodeid,
- orig_size, caller);
-}
-
-#ifdef CONFIG_PRINTK
-void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
-{
- struct kmem_cache *cachep;
- unsigned int objnr;
- void *objp;
-
- kpp->kp_ptr = object;
- kpp->kp_slab = slab;
- cachep = slab->slab_cache;
- kpp->kp_slab_cache = cachep;
- objp = object - obj_offset(cachep);
- kpp->kp_data_offset = obj_offset(cachep);
- slab = virt_to_slab(objp);
- objnr = obj_to_index(cachep, slab, objp);
- objp = index_to_obj(cachep, slab, objnr);
- kpp->kp_objp = objp;
- if (DEBUG && cachep->flags & SLAB_STORE_USER)
- kpp->kp_ret = *dbg_userword(cachep, objp);
-}
-#endif
-
-static __always_inline
-void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- debug_check_no_locks_freed(objp, cachep->object_size);
- if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(objp, cachep->object_size);
- __cache_free(cachep, objp, caller);
- local_irq_restore(flags);
-}
-
-void __kmem_cache_free(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
-{
- __do_kmem_cache_free(cachep, objp, caller);
-}
-
-/**
- * kmem_cache_free - Deallocate an object
- * @cachep: The cache the allocation was from.
- * @objp: The previously allocated object.
- *
- * Free an object which was previously allocated from this
- * cache.
- */
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
-{
- cachep = cache_from_obj(cachep, objp);
- if (!cachep)
- return;
-
- trace_kmem_cache_free(_RET_IP_, objp, cachep);
- __do_kmem_cache_free(cachep, objp, _RET_IP_);
-}
-EXPORT_SYMBOL(kmem_cache_free);
-
-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- for (int i = 0; i < size; i++) {
- void *objp = p[i];
- struct kmem_cache *s;
-
- if (!orig_s) {
- struct folio *folio = virt_to_folio(objp);
-
- /* called via kfree_bulk */
- if (!folio_test_slab(folio)) {
- local_irq_restore(flags);
- free_large_kmalloc(folio, objp);
- local_irq_save(flags);
- continue;
- }
- s = folio_slab(folio)->slab_cache;
- } else {
- s = cache_from_obj(orig_s, objp);
- }
-
- if (!s)
- continue;
-
- debug_check_no_locks_freed(objp, s->object_size);
- if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(objp, s->object_size);
-
- __cache_free(s, objp, _RET_IP_);
- }
- local_irq_restore(flags);
-
- /* FIXME: add tracing */
-}
-EXPORT_SYMBOL(kmem_cache_free_bulk);
-
-/*
- * This initializes kmem_cache_node or resizes various caches for all nodes.
- */
-static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
-{
- int ret;
- int node;
- struct kmem_cache_node *n;
-
- for_each_online_node(node) {
- ret = setup_kmem_cache_node(cachep, node, gfp, true);
- if (ret)
- goto fail;
-
- }
-
- return 0;
-
-fail:
- if (!cachep->list.next) {
- /* Cache is not active yet. Roll back what we did */
- node--;
- while (node >= 0) {
- n = get_node(cachep, node);
- if (n) {
- kfree(n->shared);
- free_alien_cache(n->alien);
- kfree(n);
- cachep->node[node] = NULL;
- }
- node--;
- }
- }
- return -ENOMEM;
-}
-
-/* Always called with the slab_mutex held */
-static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
- int batchcount, int shared, gfp_t gfp)
-{
- struct array_cache __percpu *cpu_cache, *prev;
- int cpu;
-
- cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
- if (!cpu_cache)
- return -ENOMEM;
-
- prev = cachep->cpu_cache;
- cachep->cpu_cache = cpu_cache;
- /*
- * Without a previous cpu_cache there's no need to synchronize remote
- * cpus, so skip the IPIs.
- */
- if (prev)
- kick_all_cpus_sync();
-
- check_irq_on();
- cachep->batchcount = batchcount;
- cachep->limit = limit;
- cachep->shared = shared;
-
- if (!prev)
- goto setup_node;
-
- for_each_online_cpu(cpu) {
- LIST_HEAD(list);
- int node;
- struct kmem_cache_node *n;
- struct array_cache *ac = per_cpu_ptr(prev, cpu);
-
- node = cpu_to_mem(cpu);
- n = get_node(cachep, node);
- raw_spin_lock_irq(&n->list_lock);
- free_block(cachep, ac->entry, ac->avail, node, &list);
- raw_spin_unlock_irq(&n->list_lock);
- slabs_destroy(cachep, &list);
- }
- free_percpu(prev);
-
-setup_node:
- return setup_kmem_cache_nodes(cachep, gfp);
-}
-
-/* Called with slab_mutex held always */
-static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
-{
- int err;
- int limit = 0;
- int shared = 0;
- int batchcount = 0;
-
- err = cache_random_seq_create(cachep, cachep->num, gfp);
- if (err)
- goto end;
-
- /*
- * The head array serves three purposes:
- * - create a LIFO ordering, i.e. return objects that are cache-warm
- * - reduce the number of spinlock operations.
- * - reduce the number of linked list operations on the slab and
- * bufctl chains: array operations are cheaper.
- * The numbers are guessed, we should auto-tune as described by
- * Bonwick.
- */
- if (cachep->size > 131072)
- limit = 1;
- else if (cachep->size > PAGE_SIZE)
- limit = 8;
- else if (cachep->size > 1024)
- limit = 24;
- else if (cachep->size > 256)
- limit = 54;
- else
- limit = 120;
-
- /*
- * CPU bound tasks (e.g. network routing) can exhibit cpu bound
- * allocation behaviour: Most allocs on one cpu, most free operations
- * on another cpu. For these cases, an efficient object passing between
- * cpus is necessary. This is provided by a shared array. The array
- * replaces Bonwick's magazine layer.
- * On uniprocessor, it's functionally equivalent (but less efficient)
- * to a larger limit. Thus disabled by default.
- */
- shared = 0;
- if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
- shared = 8;
-
-#if DEBUG
- /*
- * With debugging enabled, large batchcount lead to excessively long
- * periods with disabled local interrupts. Limit the batchcount
- */
- if (limit > 32)
- limit = 32;
-#endif
- batchcount = (limit + 1) / 2;
- err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
-end:
- if (err)
- pr_err("enable_cpucache failed for %s, error %d\n",
- cachep->name, -err);
- return err;
-}
-
-/*
- * Drain an array if it contains any elements taking the node lock only if
- * necessary. Note that the node listlock also protects the array_cache
- * if drain_array() is used on the shared array.
- */
-static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
- struct array_cache *ac, int node)
-{
- LIST_HEAD(list);
-
- /* ac from n->shared can be freed if we don't hold the slab_mutex. */
- check_mutex_acquired();
-
- if (!ac || !ac->avail)
- return;
-
- if (ac->touched) {
- ac->touched = 0;
- return;
- }
-
- raw_spin_lock_irq(&n->list_lock);
- drain_array_locked(cachep, ac, node, false, &list);
- raw_spin_unlock_irq(&n->list_lock);
-
- slabs_destroy(cachep, &list);
-}
-
-/**
- * cache_reap - Reclaim memory from caches.
- * @w: work descriptor
- *
- * Called from workqueue/eventd every few seconds.
- * Purpose:
- * - clear the per-cpu caches for this CPU.
- * - return freeable pages to the main free memory pool.
- *
- * If we cannot acquire the cache chain mutex then just give up - we'll try
- * again on the next iteration.
- */
-static void cache_reap(struct work_struct *w)
-{
- struct kmem_cache *searchp;
- struct kmem_cache_node *n;
- int node = numa_mem_id();
- struct delayed_work *work = to_delayed_work(w);
-
- if (!mutex_trylock(&slab_mutex))
- /* Give up. Setup the next iteration. */
- goto out;
-
- list_for_each_entry(searchp, &slab_caches, list) {
- check_irq_on();
-
- /*
- * We only take the node lock if absolutely necessary and we
- * have established with reasonable certainty that
- * we can do some work if the lock was obtained.
- */
- n = get_node(searchp, node);
-
- reap_alien(searchp, n);
-
- drain_array(searchp, n, cpu_cache_get(searchp), node);
-
- /*
- * These are racy checks but it does not matter
- * if we skip one check or scan twice.
- */
- if (time_after(n->next_reap, jiffies))
- goto next;
-
- n->next_reap = jiffies + REAPTIMEOUT_NODE;
-
- drain_array(searchp, n, n->shared, node);
-
- if (n->free_touched)
- n->free_touched = 0;
- else {
- int freed;
-
- freed = drain_freelist(searchp, n, (n->free_limit +
- 5 * searchp->num - 1) / (5 * searchp->num));
- STATS_ADD_REAPED(searchp, freed);
- }
-next:
- cond_resched();
- }
- check_irq_on();
- mutex_unlock(&slab_mutex);
- next_reap_node();
-out:
- /* Set up the next iteration */
- schedule_delayed_work_on(smp_processor_id(), work,
- round_jiffies_relative(REAPTIMEOUT_AC));
-}
-
-void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
-{
- unsigned long active_objs, num_objs, active_slabs;
- unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
- unsigned long free_slabs = 0;
- int node;
- struct kmem_cache_node *n;
-
- for_each_kmem_cache_node(cachep, node, n) {
- check_irq_on();
- raw_spin_lock_irq(&n->list_lock);
-
- total_slabs += n->total_slabs;
- free_slabs += n->free_slabs;
- free_objs += n->free_objects;
-
- if (n->shared)
- shared_avail += n->shared->avail;
-
- raw_spin_unlock_irq(&n->list_lock);
- }
- num_objs = total_slabs * cachep->num;
- active_slabs = total_slabs - free_slabs;
- active_objs = num_objs - free_objs;
-
- sinfo->active_objs = active_objs;
- sinfo->num_objs = num_objs;
- sinfo->active_slabs = active_slabs;
- sinfo->num_slabs = total_slabs;
- sinfo->shared_avail = shared_avail;
- sinfo->limit = cachep->limit;
- sinfo->batchcount = cachep->batchcount;
- sinfo->shared = cachep->shared;
- sinfo->objects_per_slab = cachep->num;
- sinfo->cache_order = cachep->gfporder;
-}
-
-void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
-{
-#if STATS
- { /* node stats */
- unsigned long high = cachep->high_mark;
- unsigned long allocs = cachep->num_allocations;
- unsigned long grown = cachep->grown;
- unsigned long reaped = cachep->reaped;
- unsigned long errors = cachep->errors;
- unsigned long max_freeable = cachep->max_freeable;
- unsigned long node_allocs = cachep->node_allocs;
- unsigned long node_frees = cachep->node_frees;
- unsigned long overflows = cachep->node_overflow;
-
- seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
- allocs, high, grown,
- reaped, errors, max_freeable, node_allocs,
- node_frees, overflows);
- }
- /* cpu stats */
- {
- unsigned long allochit = atomic_read(&cachep->allochit);
- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
- unsigned long freehit = atomic_read(&cachep->freehit);
- unsigned long freemiss = atomic_read(&cachep->freemiss);
-
- seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
- allochit, allocmiss, freehit, freemiss);
- }
-#endif
-}
-
-#define MAX_SLABINFO_WRITE 128
-/**
- * slabinfo_write - Tuning for the slab allocator
- * @file: unused
- * @buffer: user buffer
- * @count: data length
- * @ppos: unused
- *
- * Return: %0 on success, negative error code otherwise.
- */
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
- int limit, batchcount, shared, res;
- struct kmem_cache *cachep;
-
- if (count > MAX_SLABINFO_WRITE)
- return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count))
- return -EFAULT;
- kbuf[MAX_SLABINFO_WRITE] = '\0';
-
- tmp = strchr(kbuf, ' ');
- if (!tmp)
- return -EINVAL;
- *tmp = '\0';
- tmp++;
- if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
- return -EINVAL;
-
- /* Find the cache in the chain of caches. */
- mutex_lock(&slab_mutex);
- res = -EINVAL;
- list_for_each_entry(cachep, &slab_caches, list) {
- if (!strcmp(cachep->name, kbuf)) {
- if (limit < 1 || batchcount < 1 ||
- batchcount > limit || shared < 0) {
- res = 0;
- } else {
- res = do_tune_cpucache(cachep, limit,
- batchcount, shared,
- GFP_KERNEL);
- }
- break;
- }
- }
- mutex_unlock(&slab_mutex);
- if (res >= 0)
- res = count;
- return res;
-}
-
-#ifdef CONFIG_HARDENED_USERCOPY
-/*
- * Rejects incorrectly sized objects and objects that are to be copied
- * to/from userspace but do not fall entirely within the containing slab
- * cache's usercopy region.
- *
- * Returns NULL if check passes, otherwise const char * to name of cache
- * to indicate an error.
- */
-void __check_heap_object(const void *ptr, unsigned long n,
- const struct slab *slab, bool to_user)
-{
- struct kmem_cache *cachep;
- unsigned int objnr;
- unsigned long offset;
-
- ptr = kasan_reset_tag(ptr);
-
- /* Find and validate object. */
- cachep = slab->slab_cache;
- objnr = obj_to_index(cachep, slab, (void *)ptr);
- BUG_ON(objnr >= cachep->num);
-
- /* Find offset within object. */
- if (is_kfence_address(ptr))
- offset = ptr - kfence_object_start(ptr);
- else
- offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep);
-
- /* Allow address range falling entirely within usercopy region. */
- if (offset >= cachep->useroffset &&
- offset - cachep->useroffset <= cachep->usersize &&
- n <= cachep->useroffset - offset + cachep->usersize)
- return;
-
- usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
-}
-#endif /* CONFIG_HARDENED_USERCOPY */
diff --git a/mm/slab.h b/mm/slab.h
index 3d07fb428393..54deeb0428c6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -1,10 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MM_SLAB_H
#define MM_SLAB_H
+
+#include <linux/reciprocal_div.h>
+#include <linux/list_lru.h>
+#include <linux/local_lock.h>
+#include <linux/random.h>
+#include <linux/kobject.h>
+#include <linux/sched/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/kfence.h>
+#include <linux/kasan.h>
+
/*
* Internal slab definitions
*/
-void __init kmem_cache_init(void);
#ifdef CONFIG_64BIT
# ifdef system_has_cmpxchg128
@@ -42,21 +52,6 @@ typedef union {
struct slab {
unsigned long __page_flags;
-#if defined(CONFIG_SLAB)
-
- struct kmem_cache *slab_cache;
- union {
- struct {
- struct list_head slab_list;
- void *freelist; /* array of free object indexes */
- void *s_mem; /* first object */
- };
- struct rcu_head rcu_head;
- };
- unsigned int active;
-
-#elif defined(CONFIG_SLUB)
-
struct kmem_cache *slab_cache;
union {
struct {
@@ -91,10 +86,6 @@ struct slab {
};
unsigned int __unused;
-#else
-#error "Unexpected slab allocator configured"
-#endif
-
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
@@ -111,7 +102,7 @@ SLAB_MATCH(memcg_data, memcg_data);
#endif
#undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page));
-#if defined(system_has_freelist_aba) && defined(CONFIG_SLUB)
+#if defined(system_has_freelist_aba)
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
#endif
@@ -228,21 +219,138 @@ static inline size_t slab_size(const struct slab *slab)
return PAGE_SIZE << slab_order(slab);
}
-#ifdef CONFIG_SLAB
-#include <linux/slab_def.h>
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_percpu_partial(c) ((c)->partial)
+
+#define slub_set_percpu_partial(c, p) \
+({ \
+ slub_percpu_partial(c) = (p)->next; \
+})
+
+#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
+#else
+#define slub_percpu_partial(c) NULL
+
+#define slub_set_percpu_partial(c, p)
+
+#define slub_percpu_partial_read_once(c) NULL
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned int x;
+};
+
+/*
+ * Slab cache management.
+ */
+struct kmem_cache {
+#ifndef CONFIG_SLUB_TINY
+ struct kmem_cache_cpu __percpu *cpu_slab;
+#endif
+ /* Used for retrieving partial slabs, etc. */
+ slab_flags_t flags;
+ unsigned long min_partial;
+ unsigned int size; /* Object size including metadata */
+ unsigned int object_size; /* Object size without metadata */
+ struct reciprocal_value reciprocal_size;
+ unsigned int offset; /* Free pointer offset */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
+ /* Number of per cpu partial slabs to keep around */
+ unsigned int cpu_partial_slabs;
+#endif
+ struct kmem_cache_order_objects oo;
+
+ /* Allocation and freeing of slabs */
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+ int refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *object); /* Object constructor */
+ unsigned int inuse; /* Offset to metadata */
+ unsigned int align; /* Alignment */
+ unsigned int red_left_pad; /* Left redzone padding size */
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ unsigned long random;
#endif
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
+#ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+ */
+ unsigned int remote_node_defrag_ratio;
#endif
-#include <linux/memcontrol.h>
-#include <linux/fault-inject.h>
-#include <linux/kasan.h>
-#include <linux/kmemleak.h>
-#include <linux/random.h>
-#include <linux/sched/mm.h>
-#include <linux/list_lru.h>
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ unsigned int *random_seq;
+#endif
+
+#ifdef CONFIG_KASAN_GENERIC
+ struct kasan_cache kasan_info;
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+#endif
+
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *s);
+void sysfs_slab_release(struct kmem_cache *s);
+#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
+static inline void sysfs_slab_release(struct kmem_cache *s) { }
+#endif
+
+void *fixup_red_left(struct kmem_cache *s, void *p);
+
+static inline void *nearest_obj(struct kmem_cache *cache,
+ const struct slab *slab, void *x)
+{
+ void *object = x - (x - slab_address(slab)) % cache->size;
+ void *last_object = slab_address(slab) +
+ (slab->objects - 1) * cache->size;
+ void *result = (unlikely(object > last_object)) ? last_object : object;
+
+ result = fixup_red_left(cache, result);
+ return result;
+}
+
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
+{
+ if (is_kfence_address(obj))
+ return 0;
+ return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ return slab->objects;
+}
/*
* State of the slab allocator.
@@ -281,19 +389,39 @@ extern const struct kmalloc_info_struct {
void setup_kmalloc_cache_index_table(void);
void create_kmalloc_caches(slab_flags_t);
-/* Find the kmalloc slab corresponding for a certain size */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
+extern u8 kmalloc_size_index[24];
+
+static inline unsigned int size_index_elem(unsigned int bytes)
+{
+ return (bytes - 1) / 8;
+}
+
+/*
+ * Find the kmem_cache structure that serves a given size of
+ * allocation
+ *
+ * This assumes size is larger than zero and not larger than
+ * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
+ */
+static inline struct kmem_cache *
+kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+{
+ unsigned int index;
+
+ if (size <= 192)
+ index = kmalloc_size_index[size_index_elem(size)];
+ else
+ index = fls(size - 1);
-void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t orig_size,
- unsigned long caller);
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
+ return kmalloc_caches[kmalloc_type(flags, caller)][index];
+}
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
+void __init kmem_cache_init(void);
void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type,
slab_flags_t flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
@@ -320,26 +448,16 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
SLAB_CACHE_DMA32 | SLAB_PANIC | \
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
-#if defined(CONFIG_DEBUG_SLAB)
-#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-#elif defined(CONFIG_SLUB_DEBUG)
+#ifdef CONFIG_SLUB_DEBUG
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
#else
#define SLAB_DEBUG_FLAGS (0)
#endif
-#if defined(CONFIG_SLAB)
-#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
- SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
- SLAB_ACCOUNT | SLAB_NO_MERGE)
-#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_ACCOUNT | \
SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
-#else
-#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
-#endif
/* Common flags available with current configuration */
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
@@ -387,12 +505,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
-static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
-{
- return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
-}
-
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
@@ -452,238 +564,32 @@ int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab);
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
enum node_stat_item idx, int nr);
-
-static inline void memcg_free_slab_cgroups(struct slab *slab)
-{
- kfree(slab_objcgs(slab));
- slab->memcg_data = 0;
-}
-
-static inline size_t obj_full_size(struct kmem_cache *s)
-{
- /*
- * For each accounted object there is an extra space which is used
- * to store obj_cgroup membership. Charge it too.
- */
- return s->size + sizeof(struct obj_cgroup *);
-}
-
-/*
- * Returns false if the allocation should fail.
- */
-static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
- struct list_lru *lru,
- struct obj_cgroup **objcgp,
- size_t objects, gfp_t flags)
-{
- struct obj_cgroup *objcg;
-
- if (!memcg_kmem_online())
- return true;
-
- if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
- return true;
-
- /*
- * The obtained objcg pointer is safe to use within the current scope,
- * defined by current task or set_active_memcg() pair.
- * obj_cgroup_get() is used to get a permanent reference.
- */
- objcg = current_obj_cgroup();
- if (!objcg)
- return true;
-
- if (lru) {
- int ret;
- struct mem_cgroup *memcg;
-
- memcg = get_mem_cgroup_from_objcg(objcg);
- ret = memcg_list_lru_alloc(memcg, lru, flags);
- css_put(&memcg->css);
-
- if (ret)
- return false;
- }
-
- if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
- return false;
-
- *objcgp = objcg;
- return true;
-}
-
-static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
- struct obj_cgroup *objcg,
- gfp_t flags, size_t size,
- void **p)
-{
- struct slab *slab;
- unsigned long off;
- size_t i;
-
- if (!memcg_kmem_online() || !objcg)
- return;
-
- for (i = 0; i < size; i++) {
- if (likely(p[i])) {
- slab = virt_to_slab(p[i]);
-
- if (!slab_objcgs(slab) &&
- memcg_alloc_slab_cgroups(slab, s, flags,
- false)) {
- obj_cgroup_uncharge(objcg, obj_full_size(s));
- continue;
- }
-
- off = obj_to_index(s, slab, p[i]);
- obj_cgroup_get(objcg);
- slab_objcgs(slab)[off] = objcg;
- mod_objcg_state(objcg, slab_pgdat(slab),
- cache_vmstat_idx(s), obj_full_size(s));
- } else {
- obj_cgroup_uncharge(objcg, obj_full_size(s));
- }
- }
-}
-
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
- void **p, int objects)
-{
- struct obj_cgroup **objcgs;
- int i;
-
- if (!memcg_kmem_online())
- return;
-
- objcgs = slab_objcgs(slab);
- if (!objcgs)
- return;
-
- for (i = 0; i < objects; i++) {
- struct obj_cgroup *objcg;
- unsigned int off;
-
- off = obj_to_index(s, slab, p[i]);
- objcg = objcgs[off];
- if (!objcg)
- continue;
-
- objcgs[off] = NULL;
- obj_cgroup_uncharge(objcg, obj_full_size(s));
- mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
- -obj_full_size(s));
- obj_cgroup_put(objcg);
- }
-}
-
#else /* CONFIG_MEMCG_KMEM */
static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
{
return NULL;
}
-static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
-{
- return NULL;
-}
-
static inline int memcg_alloc_slab_cgroups(struct slab *slab,
struct kmem_cache *s, gfp_t gfp,
bool new_slab)
{
return 0;
}
-
-static inline void memcg_free_slab_cgroups(struct slab *slab)
-{
-}
-
-static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
- struct list_lru *lru,
- struct obj_cgroup **objcgp,
- size_t objects, gfp_t flags)
-{
- return true;
-}
-
-static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
- struct obj_cgroup *objcg,
- gfp_t flags, size_t size,
- void **p)
-{
-}
-
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
- void **p, int objects)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
- struct slab *slab;
-
- slab = virt_to_slab(obj);
- if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
- __func__))
- return NULL;
- return slab->slab_cache;
-}
-
-static __always_inline void account_slab(struct slab *slab, int order,
- struct kmem_cache *s, gfp_t gfp)
-{
- if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
- memcg_alloc_slab_cgroups(slab, s, gfp, true);
-
- mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
- PAGE_SIZE << order);
-}
-
-static __always_inline void unaccount_slab(struct slab *slab, int order,
- struct kmem_cache *s)
-{
- if (memcg_kmem_online())
- memcg_free_slab_cgroups(slab);
-
- mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
- -(PAGE_SIZE << order));
-}
-
-static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
-{
- struct kmem_cache *cachep;
-
- if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
- !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
- return s;
-
- cachep = virt_to_cache(x);
- if (WARN(cachep && cachep != s,
- "%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name))
- print_tracking(cachep, x);
- return cachep;
-}
-
-void free_large_kmalloc(struct folio *folio, void *object);
-
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
{
-#ifndef CONFIG_SLUB
- return s->object_size;
-
-#else /* CONFIG_SLUB */
-# ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size;
-# endif
+#endif
if (s->flags & SLAB_KASAN)
return s->object_size;
/*
@@ -697,128 +603,9 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* Else we can use all the padding etc for the allocation
*/
return s->size;
-#endif
-}
-
-static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
- struct list_lru *lru,
- struct obj_cgroup **objcgp,
- size_t size, gfp_t flags)
-{
- flags &= gfp_allowed_mask;
-
- might_alloc(flags);
-
- if (should_failslab(s, flags))
- return NULL;
-
- if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
- return NULL;
-
- return s;
-}
-
-static inline void slab_post_alloc_hook(struct kmem_cache *s,
- struct obj_cgroup *objcg, gfp_t flags,
- size_t size, void **p, bool init,
- unsigned int orig_size)
-{
- unsigned int zero_size = s->object_size;
- bool kasan_init = init;
- size_t i;
-
- flags &= gfp_allowed_mask;
-
- /*
- * For kmalloc object, the allocated memory size(object_size) is likely
- * larger than the requested size(orig_size). If redzone check is
- * enabled for the extra space, don't zero it, as it will be redzoned
- * soon. The redzone operation for this extra space could be seen as a
- * replacement of current poisoning under certain debug option, and
- * won't break other sanity checks.
- */
- if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
- (s->flags & SLAB_KMALLOC))
- zero_size = orig_size;
-
- /*
- * When slub_debug is enabled, avoid memory initialization integrated
- * into KASAN and instead zero out the memory via the memset below with
- * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
- * cause false-positive reports. This does not lead to a performance
- * penalty on production builds, as slub_debug is not intended to be
- * enabled there.
- */
- if (__slub_debug_enabled())
- kasan_init = false;
-
- /*
- * As memory initialization might be integrated into KASAN,
- * kasan_slab_alloc and initialization memset must be
- * kept together to avoid discrepancies in behavior.
- *
- * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
- */
- for (i = 0; i < size; i++) {
- p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
- if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
- memset(p[i], 0, zero_size);
- kmemleak_alloc_recursive(p[i], s->object_size, 1,
- s->flags, flags);
- kmsan_slab_alloc(s, p[i], flags);
- }
-
- memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
-/*
- * The slab lists for all objects.
- */
-struct kmem_cache_node {
-#ifdef CONFIG_SLAB
- raw_spinlock_t list_lock;
- struct list_head slabs_partial; /* partial list first, better asm code */
- struct list_head slabs_full;
- struct list_head slabs_free;
- unsigned long total_slabs; /* length of all slab lists */
- unsigned long free_slabs; /* length of free slab list only */
- unsigned long free_objects;
- unsigned int free_limit;
- unsigned int colour_next; /* Per-node cache coloring */
- struct array_cache *shared; /* shared per node */
- struct alien_cache **alien; /* on other nodes */
- unsigned long next_reap; /* updated without locking */
- int free_touched; /* updated without locking */
-#endif
-
-#ifdef CONFIG_SLUB
- spinlock_t list_lock;
- unsigned long nr_partial;
- struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
- atomic_long_t nr_slabs;
- atomic_long_t total_objects;
- struct list_head full;
-#endif
-#endif
-
-};
-
-static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
-{
- return s->node[node];
-}
-
-/*
- * Iterator over all nodes. The body will be executed for each node that has
- * a kmem_cache_node structure allocated (which is true for all online nodes)
- */
-#define for_each_kmem_cache_node(__s, __node, __n) \
- for (__node = 0; __node < nr_node_ids; __node++) \
- if ((__n = get_node(__s, __node)))
-
-
-#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
void dump_unreclaimable_slab(void);
#else
static inline void dump_unreclaimable_slab(void)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8d431193c273..238293b1dbe1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -21,6 +21,7 @@
#include <linux/swiotlb.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
#include <linux/kasan.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -71,10 +72,8 @@ static int __init setup_slab_merge(char *str)
return 1;
}
-#ifdef CONFIG_SLUB
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
-#endif
__setup("slab_nomerge", setup_slab_nomerge);
__setup("slab_merge", setup_slab_merge);
@@ -197,10 +196,6 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (s->size - size >= sizeof(void *))
continue;
- if (IS_ENABLED(CONFIG_SLAB) && align &&
- (align > s->align || s->align % align))
- continue;
-
return s;
}
return NULL;
@@ -670,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static u8 size_index[24] __ro_after_init = {
+u8 kmalloc_size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
@@ -697,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
2 /* 192 */
};
-static inline unsigned int size_index_elem(unsigned int bytes)
-{
- return (bytes - 1) / 8;
-}
-
-/*
- * Find the kmem_cache structure that serves a given size of
- * allocation
- */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
-{
- unsigned int index;
-
- if (size <= 192) {
- if (!size)
- return ZERO_SIZE_PTR;
-
- index = size_index[size_index_elem(size)];
- } else {
- if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
- return NULL;
- index = fls(size - 1);
- }
-
- return kmalloc_caches[kmalloc_type(flags, caller)][index];
-}
-
size_t kmalloc_size_roundup(size_t size)
{
if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
@@ -848,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
unsigned int elem = size_index_elem(i);
- if (elem >= ARRAY_SIZE(size_index))
+ if (elem >= ARRAY_SIZE(kmalloc_size_index))
break;
- size_index[elem] = KMALLOC_SHIFT_LOW;
+ kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
}
if (KMALLOC_MIN_SIZE >= 64) {
@@ -859,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
- size_index[size_index_elem(i)] = 7;
+ kmalloc_size_index[size_index_elem(i)] = 7;
}
@@ -870,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
- size_index[size_index_elem(i)] = 8;
+ kmalloc_size_index[size_index_elem(i)] = 8;
}
}
@@ -968,95 +936,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
slab_state = UP;
}
-void free_large_kmalloc(struct folio *folio, void *object)
-{
- unsigned int order = folio_order(folio);
-
- if (WARN_ON_ONCE(order == 0))
- pr_warn_once("object pointer: 0x%p\n", object);
-
- kmemleak_free(object);
- kasan_kfree_large(object);
- kmsan_kfree_large(object);
-
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
-}
-
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
-static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
-{
- struct kmem_cache *s;
- void *ret;
-
- if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
- ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc(caller, ret, size,
- PAGE_SIZE << get_order(size), flags, node);
- return ret;
- }
-
- s = kmalloc_slab(size, flags, caller);
-
- if (unlikely(ZERO_OR_NULL_PTR(s)))
- return s;
-
- ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
- ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc(caller, ret, size, s->size, flags, node);
- return ret;
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __do_kmalloc_node(size, flags, node, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-
-void *__kmalloc(size_t size, gfp_t flags)
-{
- return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc);
-
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
- int node, unsigned long caller)
-{
- return __do_kmalloc_node(size, flags, node, caller);
-}
-EXPORT_SYMBOL(__kmalloc_node_track_caller);
-
-/**
- * kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
- *
- * If @object is NULL, no operation is performed.
- */
-void kfree(const void *object)
-{
- struct folio *folio;
- struct slab *slab;
- struct kmem_cache *s;
-
- trace_kfree(_RET_IP_, object);
-
- if (unlikely(ZERO_OR_NULL_PTR(object)))
- return;
-
- folio = virt_to_folio(object);
- if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
- return;
- }
-
- slab = folio_slab(folio);
- s = slab->slab_cache;
- __kmem_cache_free(s, (void *)object, _RET_IP_);
-}
-EXPORT_SYMBOL(kfree);
-
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
@@ -1093,30 +972,6 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache);
}
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
- size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_trace);
-
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_node_trace);
-
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1129,57 +984,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
return flags;
}
-/*
- * To avoid unnecessary overhead, we pass through large allocation requests
- * directly to the page allocator. We use __GFP_COMP, because we will need to
- * know the allocation order to free the pages properly in kfree.
- */
-
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- struct page *page;
- void *ptr = NULL;
- unsigned int order = get_order(size);
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK))
- flags = kmalloc_fix_flags(flags);
-
- flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, order);
- if (page) {
- ptr = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
- }
-
- ptr = kasan_kmalloc_large(ptr, size, flags);
- /* As ptr might get tagged, call kmemleak hook after KASAN. */
- kmemleak_alloc(ptr, size, 1, flags);
- kmsan_kmalloc_large(ptr, size, flags);
-
- return ptr;
-}
-
-void *kmalloc_large(size_t size, gfp_t flags)
-{
- void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, NUMA_NO_NODE);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
-
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- void *ret = __kmalloc_large_node(size, flags, node);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, node);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large_node);
-
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
static void freelist_randomize(unsigned int *list,
@@ -1222,12 +1026,8 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
-#ifdef CONFIG_SLAB
-#define SLABINFO_RIGHTS (0600)
-#else
+#ifdef CONFIG_SLUB_DEBUG
#define SLABINFO_RIGHTS (0400)
-#endif
static void print_slabinfo_header(struct seq_file *m)
{
@@ -1235,18 +1035,10 @@ static void print_slabinfo_header(struct seq_file *m)
* Output format version, so at least we can change it
* without _too_ many complaints.
*/
-#ifdef CONFIG_DEBUG_SLAB
- seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
-#else
seq_puts(m, "slabinfo - version: 2.1\n");
-#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
-#ifdef CONFIG_DEBUG_SLAB
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
-#endif
seq_putc(m, '\n');
}
@@ -1370,7 +1162,7 @@ static int __init slab_proc_init(void)
}
module_init(slab_proc_init);
-#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
+#endif /* CONFIG_SLUB_DEBUG */
static __always_inline __realloc_size(2) void *
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
@@ -1488,10 +1280,3 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
-int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
-{
- if (__should_failslab(s, gfpflags))
- return -ENOMEM;
- return 0;
-}
-ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
diff --git a/mm/slub.c b/mm/slub.c
index 63d281dfacdb..2ef88bbf56a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -34,6 +34,7 @@
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
+#include <linux/kmemleak.h>
#include <linux/stacktrace.h>
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
@@ -76,13 +77,28 @@
*
* Frozen slabs
*
- * If a slab is frozen then it is exempt from list management. It is not
- * on any list except per cpu partial list. The processor that froze the
+ * If a slab is frozen then it is exempt from list management. It is
+ * the cpu slab which is actively allocated from by the processor that
+ * froze it and it is not on any list. The processor that froze the
* slab is the one who can perform list operations on the slab. Other
* processors may put objects onto the freelist but the processor that
* froze the slab is the only one that can retrieve the objects from the
* slab's freelist.
*
+ * CPU partial slabs
+ *
+ * The partially empty slabs cached on the CPU partial list are used
+ * for performance reasons, which speeds up the allocation process.
+ * These slabs are not frozen, but are also exempt from list management,
+ * by clearing the PG_workingset flag when moving out of the node
+ * partial list. Please see __slab_free() for more details.
+ *
+ * To sum up, the current scheme is:
+ * - node partial slab: PG_Workingset && !frozen
+ * - cpu partial slab: !PG_Workingset && !frozen
+ * - cpu slab: !PG_Workingset && frozen
+ * - full slab: !PG_Workingset && !frozen
+ *
* list_lock
*
* The list_lock protects the partial and full list on each node and
@@ -204,9 +220,9 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
/* Structure holding parameters for get_partial() call chain */
struct partial_context {
- struct slab **slab;
gfp_t flags;
unsigned int orig_size;
+ void *object;
};
static inline bool kmem_cache_debug(struct kmem_cache *s)
@@ -330,6 +346,60 @@ static void debugfs_slab_add(struct kmem_cache *);
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif
+enum stat_item {
+ ALLOC_FASTPATH, /* Allocation from cpu slab */
+ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
+ FREE_FASTPATH, /* Free to cpu slab */
+ FREE_SLOWPATH, /* Freeing not to cpu slab */
+ FREE_FROZEN, /* Freeing to frozen slab */
+ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
+ FREE_REMOVE_PARTIAL, /* Freeing removes last object */
+ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
+ ALLOC_SLAB, /* Cpu slab acquired from page allocator */
+ ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ ALLOC_NODE_MISMATCH, /* Switching cpu slab */
+ FREE_SLAB, /* Slab freed to the page allocator */
+ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
+ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
+ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
+ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
+ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
+ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ DEACTIVATE_BYPASS, /* Implicit deactivation */
+ ORDER_FALLBACK, /* Number of times fallback was necessary */
+ CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
+ CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
+ CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
+ CPU_PARTIAL_FREE, /* Refill cpu partial on free */
+ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
+ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
+ NR_SLUB_STAT_ITEMS
+};
+
+#ifndef CONFIG_SLUB_TINY
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+struct kmem_cache_cpu {
+ union {
+ struct {
+ void **freelist; /* Pointer to next available object */
+ unsigned long tid; /* Globally unique transaction id */
+ };
+ freelist_aba_t freelist_tid;
+ };
+ struct slab *slab; /* The slab from which we are allocating */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ struct slab *partial; /* Partially allocated frozen slabs */
+#endif
+ local_lock_t lock; /* Protects the fields above */
+#ifdef CONFIG_SLUB_STATS
+ unsigned int stat[NR_SLUB_STAT_ITEMS];
+#endif
+};
+#endif /* CONFIG_SLUB_TINY */
+
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
@@ -341,6 +411,41 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
#endif
}
+static inline
+void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
+{
+#ifdef CONFIG_SLUB_STATS
+ raw_cpu_add(s->cpu_slab->stat[si], v);
+#endif
+}
+
+/*
+ * The slab lists for all objects.
+ */
+struct kmem_cache_node {
+ spinlock_t list_lock;
+ unsigned long nr_partial;
+ struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
+ atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
+ struct list_head full;
+#endif
+};
+
+static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
+{
+ return s->node[node];
+}
+
+/*
+ * Iterator over all nodes. The body will be executed for each node that has
+ * a kmem_cache_node structure allocated (which is true for all online nodes)
+ */
+#define for_each_kmem_cache_node(__s, __node, __n) \
+ for (__node = 0; __node < nr_node_ids; __node++) \
+ if ((__n = get_node(__s, __node)))
+
/*
* Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
* Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
@@ -522,7 +627,7 @@ static __always_inline void slab_unlock(struct slab *slab)
struct page *page = slab_page(slab);
VM_BUG_ON_PAGE(PageTail(page), page);
- __bit_spin_unlock(PG_locked, &page->flags);
+ bit_spin_unlock(PG_locked, &page->flags);
}
static inline bool
@@ -870,20 +975,20 @@ static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned int orig_size)
{
void *p = kasan_reset_tag(object);
+ unsigned int kasan_meta_size;
if (!slub_debug_orig_size(s))
return;
-#ifdef CONFIG_KASAN_GENERIC
/*
- * KASAN could save its free meta data in object's data area at
- * offset 0, if the size is larger than 'orig_size', it will
- * overlap the data redzone in [orig_size+1, object_size], and
- * the check should be skipped.
+ * KASAN can save its free meta data inside of the object at offset 0.
+ * If this meta data size is larger than 'orig_size', it will overlap
+ * the data redzone in [orig_size+1, object_size]. Thus, we adjust
+ * 'orig_size' to be as at least as big as KASAN's meta data.
*/
- if (kasan_metadata_size(s, true) > orig_size)
- orig_size = s->object_size;
-#endif
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size > orig_size)
+ orig_size = kasan_meta_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
@@ -1192,7 +1297,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
{
u8 *p = object;
u8 *endobject = object + s->object_size;
- unsigned int orig_size;
+ unsigned int orig_size, kasan_meta_size;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1222,12 +1327,23 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
}
if (s->flags & SLAB_POISON) {
- if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
- (!check_bytes_and_report(s, slab, p, "Poison", p,
- POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, slab, p, "End Poison",
- p + s->object_size - 1, POISON_END, 1)))
- return 0;
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
+ /*
+ * KASAN can save its free meta data inside of the
+ * object at offset 0. Thus, skip checking the part of
+ * the redzone that overlaps with the meta data.
+ */
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size < s->object_size - 1 &&
+ !check_bytes_and_report(s, slab, p, "Poison",
+ p + kasan_meta_size, POISON_FREE,
+ s->object_size - kasan_meta_size - 1))
+ return 0;
+ if (kasan_meta_size < s->object_size &&
+ !check_bytes_and_report(s, slab, p, "End Poison",
+ p + s->object_size - 1, POISON_END, 1))
+ return 0;
+ }
/*
* check_pad_bytes cleans up on its own.
*/
@@ -1759,12 +1875,214 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
#endif
#endif /* CONFIG_SLUB_DEBUG */
+static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
+{
+ return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+static inline void memcg_free_slab_cgroups(struct slab *slab)
+{
+ kfree(slab_objcgs(slab));
+ slab->memcg_data = 0;
+}
+
+static inline size_t obj_full_size(struct kmem_cache *s)
+{
+ /*
+ * For each accounted object there is an extra space which is used
+ * to store obj_cgroup membership. Charge it too.
+ */
+ return s->size + sizeof(struct obj_cgroup *);
+}
+
+/*
+ * Returns false if the allocation should fail.
+ */
+static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct list_lru *lru,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
+{
+ /*
+ * The obtained objcg pointer is safe to use within the current scope,
+ * defined by current task or set_active_memcg() pair.
+ * obj_cgroup_get() is used to get a permanent reference.
+ */
+ struct obj_cgroup *objcg = current_obj_cgroup();
+ if (!objcg)
+ return true;
+
+ if (lru) {
+ int ret;
+ struct mem_cgroup *memcg;
+
+ memcg = get_mem_cgroup_from_objcg(objcg);
+ ret = memcg_list_lru_alloc(memcg, lru, flags);
+ css_put(&memcg->css);
+
+ if (ret)
+ return false;
+ }
+
+ if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
+ return false;
+
+ *objcgp = objcg;
+ return true;
+}
+
+/*
+ * Returns false if the allocation should fail.
+ */
+static __fastpath_inline
+bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
+ struct obj_cgroup **objcgp, size_t objects,
+ gfp_t flags)
+{
+ if (!memcg_kmem_online())
+ return true;
+
+ if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
+ return true;
+
+ return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects,
+ flags));
+}
+
+static void __memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ gfp_t flags, size_t size,
+ void **p)
+{
+ struct slab *slab;
+ unsigned long off;
+ size_t i;
+
+ flags &= gfp_allowed_mask;
+
+ for (i = 0; i < size; i++) {
+ if (likely(p[i])) {
+ slab = virt_to_slab(p[i]);
+
+ if (!slab_objcgs(slab) &&
+ memcg_alloc_slab_cgroups(slab, s, flags, false)) {
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ continue;
+ }
+
+ off = obj_to_index(s, slab, p[i]);
+ obj_cgroup_get(objcg);
+ slab_objcgs(slab)[off] = objcg;
+ mod_objcg_state(objcg, slab_pgdat(slab),
+ cache_vmstat_idx(s), obj_full_size(s));
+ } else {
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ }
+ }
+}
+
+static __fastpath_inline
+void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
+ gfp_t flags, size_t size, void **p)
+{
+ if (likely(!memcg_kmem_online() || !objcg))
+ return;
+
+ return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+}
+
+static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+ void **p, int objects,
+ struct obj_cgroup **objcgs)
+{
+ for (int i = 0; i < objects; i++) {
+ struct obj_cgroup *objcg;
+ unsigned int off;
+
+ off = obj_to_index(s, slab, p[i]);
+ objcg = objcgs[off];
+ if (!objcg)
+ continue;
+
+ objcgs[off] = NULL;
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
+ -obj_full_size(s));
+ obj_cgroup_put(objcg);
+ }
+}
+
+static __fastpath_inline
+void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
+ int objects)
+{
+ struct obj_cgroup **objcgs;
+
+ if (!memcg_kmem_online())
+ return;
+
+ objcgs = slab_objcgs(slab);
+ if (likely(!objcgs))
+ return;
+
+ __memcg_slab_free_hook(s, slab, p, objects, objcgs);
+}
+
+static inline
+void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
+ struct obj_cgroup *objcg)
+{
+ if (objcg)
+ obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
+}
+#else /* CONFIG_MEMCG_KMEM */
+static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
+{
+ return NULL;
+}
+
+static inline void memcg_free_slab_cgroups(struct slab *slab)
+{
+}
+
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct list_lru *lru,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
+{
+ return true;
+}
+
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ gfp_t flags, size_t size,
+ void **p)
+{
+}
+
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+ void **p, int objects)
+{
+}
+
+static inline
+void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
+ struct obj_cgroup *objcg)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
+ *
+ * Returns true if freeing of the object can proceed, false if its reuse
+ * was delayed by KASAN quarantine, or it was returned to KFENCE.
*/
-static __always_inline bool slab_free_hook(struct kmem_cache *s,
- void *x, bool init)
+static __always_inline
+bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
{
kmemleak_free_recursive(x, s->flags);
kmsan_slab_free(s, x);
@@ -1779,6 +2097,9 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
__kcsan_check_access(x, s->object_size,
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
+ if (kfence_free(x))
+ return false;
+
/*
* As memory initialization might be integrated into KASAN,
* kasan_slab_free and initialization memset's must be
@@ -1787,7 +2108,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
* The initialization memset's clear the object and the metadata,
* but don't touch the SLAB redzone.
*/
- if (init) {
+ if (unlikely(init)) {
int rsize;
if (!kasan_has_integrated_init())
@@ -1797,7 +2118,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
s->size - s->inuse - rsize);
}
/* KASAN might put x into memory quarantine, delaying its reuse. */
- return kasan_slab_free(s, x, init);
+ return !kasan_slab_free(s, x, init);
}
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
@@ -1807,23 +2128,26 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *object;
void *next = *head;
- void *old_tail = *tail ? *tail : *head;
+ void *old_tail = *tail;
+ bool init;
if (is_kfence_address(next)) {
slab_free_hook(s, next, false);
- return true;
+ return false;
}
/* Head and tail of the reconstructed freelist */
*head = NULL;
*tail = NULL;
+ init = slab_want_init_on_free(s);
+
do {
object = next;
next = get_freepointer(s, object);
/* If object's reuse doesn't have to be delayed */
- if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
+ if (likely(slab_free_hook(s, object, init))) {
/* Move object to the new freelist */
set_freepointer(s, object, *head);
*head = object;
@@ -1838,9 +2162,6 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
}
} while (object != old_tail);
- if (*head == *tail)
- *tail = NULL;
-
return *head != NULL;
}
@@ -1849,9 +2170,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
setup_object_debug(s, object);
object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
+ kasan_unpoison_new_object(s, object);
s->ctor(object);
- kasan_poison_object_data(s, object);
+ kasan_poison_new_object(s, object);
}
return object;
}
@@ -1866,11 +2187,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct slab *slab;
unsigned int order = oo_order(oo);
- if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_pages(flags, order);
- else
- folio = (struct folio *)__alloc_pages_node(node, flags, order);
-
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
if (!folio)
return NULL;
@@ -1993,6 +2310,26 @@ static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
+static __always_inline void account_slab(struct slab *slab, int order,
+ struct kmem_cache *s, gfp_t gfp)
+{
+ if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
+ memcg_alloc_slab_cgroups(slab, s, gfp, true);
+
+ mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
+ PAGE_SIZE << order);
+}
+
+static __always_inline void unaccount_slab(struct slab *slab, int order,
+ struct kmem_cache *s)
+{
+ if (memcg_kmem_online())
+ memcg_free_slab_cgroups(slab);
+
+ mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
+ -(PAGE_SIZE << order));
+}
+
static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct slab *slab;
@@ -2117,6 +2454,25 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
}
/*
+ * SLUB reuses PG_workingset bit to keep track of whether it's on
+ * the per-node partial list.
+ */
+static inline bool slab_test_node_partial(const struct slab *slab)
+{
+ return folio_test_workingset((struct folio *)slab_folio(slab));
+}
+
+static inline void slab_set_node_partial(struct slab *slab)
+{
+ set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
+static inline void slab_clear_node_partial(struct slab *slab)
+{
+ clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
+/*
* Management of partially allocated slabs.
*/
static inline void
@@ -2127,6 +2483,7 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
list_add_tail(&slab->slab_list, &n->partial);
else
list_add(&slab->slab_list, &n->partial);
+ slab_set_node_partial(slab);
}
static inline void add_partial(struct kmem_cache_node *n,
@@ -2141,11 +2498,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
{
lockdep_assert_held(&n->list_lock);
list_del(&slab->slab_list);
+ slab_clear_node_partial(slab);
n->nr_partial--;
}
/*
- * Called only for kmem_cache_debug() caches instead of acquire_slab(), with a
+ * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
* slab from the n->partial list. Remove only a single object from the slab, do
* the alloc_debug_processing() checks and leave the slab on the list, or move
* it to full list if it was the last free object.
@@ -2213,51 +2571,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s,
return object;
}
-/*
- * Remove slab from the partial list, freeze it and
- * return the pointer to the freelist.
- *
- * Returns a list of objects or NULL if it fails.
- */
-static inline void *acquire_slab(struct kmem_cache *s,
- struct kmem_cache_node *n, struct slab *slab,
- int mode)
-{
- void *freelist;
- unsigned long counters;
- struct slab new;
-
- lockdep_assert_held(&n->list_lock);
-
- /*
- * Zap the freelist and set the frozen bit.
- * The old freelist is the list of objects for the
- * per cpu allocation list.
- */
- freelist = slab->freelist;
- counters = slab->counters;
- new.counters = counters;
- if (mode) {
- new.inuse = slab->objects;
- new.freelist = NULL;
- } else {
- new.freelist = freelist;
- }
-
- VM_BUG_ON(new.frozen);
- new.frozen = 1;
-
- if (!__slab_update_freelist(s, slab,
- freelist, counters,
- new.freelist, new.counters,
- "acquire_slab"))
- return NULL;
-
- remove_partial(n, slab);
- WARN_ON(!freelist);
- return freelist;
-}
-
#ifdef CONFIG_SLUB_CPU_PARTIAL
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
#else
@@ -2269,11 +2582,11 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
/*
* Try to allocate a partial slab from a specific node.
*/
-static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
- struct partial_context *pc)
+static struct slab *get_partial_node(struct kmem_cache *s,
+ struct kmem_cache_node *n,
+ struct partial_context *pc)
{
- struct slab *slab, *slab2;
- void *object = NULL;
+ struct slab *slab, *slab2, *partial = NULL;
unsigned long flags;
unsigned int partial_slabs = 0;
@@ -2288,27 +2601,25 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
- void *t;
-
if (!pfmemalloc_match(slab, pc->flags))
continue;
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
- object = alloc_single_from_partial(s, n, slab,
+ void *object = alloc_single_from_partial(s, n, slab,
pc->orig_size);
- if (object)
+ if (object) {
+ partial = slab;
+ pc->object = object;
break;
+ }
continue;
}
- t = acquire_slab(s, n, slab, object == NULL);
- if (!t)
- break;
+ remove_partial(n, slab);
- if (!object) {
- *pc->slab = slab;
+ if (!partial) {
+ partial = slab;
stat(s, ALLOC_FROM_PARTIAL);
- object = t;
} else {
put_cpu_partial(s, slab, 0);
stat(s, CPU_PARTIAL_NODE);
@@ -2324,20 +2635,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
}
spin_unlock_irqrestore(&n->list_lock, flags);
- return object;
+ return partial;
}
/*
* Get a slab from somewhere. Search in increasing NUMA distances.
*/
-static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
+static struct slab *get_any_partial(struct kmem_cache *s,
+ struct partial_context *pc)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
struct zoneref *z;
struct zone *zone;
enum zone_type highest_zoneidx = gfp_zone(pc->flags);
- void *object;
+ struct slab *slab;
unsigned int cpuset_mems_cookie;
/*
@@ -2372,8 +2684,8 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
if (n && cpuset_zone_allowed(zone, pc->flags) &&
n->nr_partial > s->min_partial) {
- object = get_partial_node(s, n, pc);
- if (object) {
+ slab = get_partial_node(s, n, pc);
+ if (slab) {
/*
* Don't check read_mems_allowed_retry()
* here - if mems_allowed was updated in
@@ -2381,7 +2693,7 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
* between allocation and the cpuset
* update
*/
- return object;
+ return slab;
}
}
}
@@ -2393,17 +2705,18 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
/*
* Get a partial slab, lock it and return it.
*/
-static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc)
+static struct slab *get_partial(struct kmem_cache *s, int node,
+ struct partial_context *pc)
{
- void *object;
+ struct slab *slab;
int searchnode = node;
if (node == NUMA_NO_NODE)
searchnode = numa_mem_id();
- object = get_partial_node(s, get_node(s, searchnode), pc);
- if (object || node != NUMA_NO_NODE)
- return object;
+ slab = get_partial_node(s, get_node(s, searchnode), pc);
+ if (slab || node != NUMA_NO_NODE)
+ return slab;
return get_any_partial(s, pc);
}
@@ -2492,10 +2805,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
void *freelist)
{
- enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
int free_delta = 0;
- enum slab_modes mode = M_NONE;
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
unsigned long flags = 0;
@@ -2533,80 +2844,52 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
/*
* Stage two: Unfreeze the slab while splicing the per-cpu
* freelist to the head of slab's freelist.
- *
- * Ensure that the slab is unfrozen while the list presence
- * reflects the actual number of objects during unfreeze.
- *
- * We first perform cmpxchg holding lock and insert to list
- * when it succeed. If there is mismatch then the slab is not
- * unfrozen and number of objects in the slab may have changed.
- * Then release lock and retry cmpxchg again.
*/
-redo:
-
- old.freelist = READ_ONCE(slab->freelist);
- old.counters = READ_ONCE(slab->counters);
- VM_BUG_ON(!old.frozen);
-
- /* Determine target state of the slab */
- new.counters = old.counters;
- if (freelist_tail) {
- new.inuse -= free_delta;
- set_freepointer(s, freelist_tail, old.freelist);
- new.freelist = freelist;
- } else
- new.freelist = old.freelist;
-
- new.frozen = 0;
+ do {
+ old.freelist = READ_ONCE(slab->freelist);
+ old.counters = READ_ONCE(slab->counters);
+ VM_BUG_ON(!old.frozen);
+
+ /* Determine target state of the slab */
+ new.counters = old.counters;
+ new.frozen = 0;
+ if (freelist_tail) {
+ new.inuse -= free_delta;
+ set_freepointer(s, freelist_tail, old.freelist);
+ new.freelist = freelist;
+ } else {
+ new.freelist = old.freelist;
+ }
+ } while (!slab_update_freelist(s, slab,
+ old.freelist, old.counters,
+ new.freelist, new.counters,
+ "unfreezing slab"));
+ /*
+ * Stage three: Manipulate the slab list based on the updated state.
+ */
if (!new.inuse && n->nr_partial >= s->min_partial) {
- mode = M_FREE;
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, slab);
+ stat(s, FREE_SLAB);
} else if (new.freelist) {
- mode = M_PARTIAL;
- /*
- * Taking the spinlock removes the possibility that
- * acquire_slab() will see a slab that is frozen
- */
spin_lock_irqsave(&n->list_lock, flags);
- } else {
- mode = M_FULL_NOLIST;
- }
-
-
- if (!slab_update_freelist(s, slab,
- old.freelist, old.counters,
- new.freelist, new.counters,
- "unfreezing slab")) {
- if (mode == M_PARTIAL)
- spin_unlock_irqrestore(&n->list_lock, flags);
- goto redo;
- }
-
-
- if (mode == M_PARTIAL) {
add_partial(n, slab, tail);
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, tail);
- } else if (mode == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, slab);
- stat(s, FREE_SLAB);
- } else if (mode == M_FULL_NOLIST) {
+ } else {
stat(s, DEACTIVATE_FULL);
}
}
#ifdef CONFIG_SLUB_CPU_PARTIAL
-static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
+static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct slab *slab, *slab_to_discard = NULL;
unsigned long flags = 0;
while (partial_slab) {
- struct slab new;
- struct slab old;
-
slab = partial_slab;
partial_slab = slab->next;
@@ -2619,23 +2902,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
spin_lock_irqsave(&n->list_lock, flags);
}
- do {
-
- old.freelist = slab->freelist;
- old.counters = slab->counters;
- VM_BUG_ON(!old.frozen);
-
- new.counters = old.counters;
- new.freelist = old.freelist;
-
- new.frozen = 0;
-
- } while (!__slab_update_freelist(s, slab,
- old.freelist, old.counters,
- new.freelist, new.counters,
- "unfreezing slab"));
-
- if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
+ if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
slab->next = slab_to_discard;
slab_to_discard = slab;
} else {
@@ -2658,9 +2925,9 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
}
/*
- * Unfreeze all the cpu partial slabs.
+ * Put all the cpu partial slabs to the node partial list.
*/
-static void unfreeze_partials(struct kmem_cache *s)
+static void put_partials(struct kmem_cache *s)
{
struct slab *partial_slab;
unsigned long flags;
@@ -2671,11 +2938,11 @@ static void unfreeze_partials(struct kmem_cache *s)
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
if (partial_slab)
- __unfreeze_partials(s, partial_slab);
+ __put_partials(s, partial_slab);
}
-static void unfreeze_partials_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c)
+static void put_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
{
struct slab *partial_slab;
@@ -2683,12 +2950,11 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
c->partial = NULL;
if (partial_slab)
- __unfreeze_partials(s, partial_slab);
+ __put_partials(s, partial_slab);
}
/*
- * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
- * partial slab slot if available.
+ * Put a slab into a partial slab slot if available.
*
* If we did not find a slot then simply move all the partials to the
* per node partial list.
@@ -2696,7 +2962,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
{
struct slab *oldslab;
- struct slab *slab_to_unfreeze = NULL;
+ struct slab *slab_to_put = NULL;
unsigned long flags;
int slabs = 0;
@@ -2711,7 +2977,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
* per node partial list. Postpone the actual unfreezing
* outside of the critical section.
*/
- slab_to_unfreeze = oldslab;
+ slab_to_put = oldslab;
oldslab = NULL;
} else {
slabs = oldslab->slabs;
@@ -2727,17 +2993,17 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- if (slab_to_unfreeze) {
- __unfreeze_partials(s, slab_to_unfreeze);
+ if (slab_to_put) {
+ __put_partials(s, slab_to_put);
stat(s, CPU_PARTIAL_DRAIN);
}
}
#else /* CONFIG_SLUB_CPU_PARTIAL */
-static inline void unfreeze_partials(struct kmem_cache *s) { }
-static inline void unfreeze_partials_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c) { }
+static inline void put_partials(struct kmem_cache *s) { }
+static inline void put_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c) { }
#endif /* CONFIG_SLUB_CPU_PARTIAL */
@@ -2779,7 +3045,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
stat(s, CPUSLAB_FLUSH);
}
- unfreeze_partials_cpu(s, c);
+ put_partials_cpu(s, c);
}
struct slub_flush_work {
@@ -2807,7 +3073,7 @@ static void flush_cpu_slab(struct work_struct *w)
if (c->slab)
flush_slab(s, c);
- unfreeze_partials(s);
+ put_partials(s);
}
static bool has_cpu_slab(int cpu, struct kmem_cache *s)
@@ -3074,6 +3340,33 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
}
/*
+ * Freeze the partial slab and return the pointer to the freelist.
+ */
+static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
+{
+ struct slab new;
+ unsigned long counters;
+ void *freelist;
+
+ do {
+ freelist = slab->freelist;
+ counters = slab->counters;
+
+ new.counters = counters;
+ VM_BUG_ON(new.frozen);
+
+ new.inuse = slab->objects;
+ new.frozen = 1;
+
+ } while (!slab_update_freelist(s, slab,
+ freelist, counters,
+ NULL, new.counters,
+ "freeze_slab"));
+
+ return freelist;
+}
+
+/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
*
@@ -3115,7 +3408,6 @@ reread_slab:
node = NUMA_NO_NODE;
goto new_slab;
}
-redo:
if (unlikely(!node_match(slab, node))) {
/*
@@ -3191,7 +3483,8 @@ deactivate_slab:
new_slab:
- if (slub_percpu_partial(c)) {
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ while (slub_percpu_partial(c)) {
local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(c->slab)) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -3203,21 +3496,45 @@ new_slab:
goto new_objects;
}
- slab = c->slab = slub_percpu_partial(c);
+ slab = slub_percpu_partial(c);
slub_set_percpu_partial(c, slab);
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
stat(s, CPU_PARTIAL_ALLOC);
- goto redo;
+
+ if (unlikely(!node_match(slab, node) ||
+ !pfmemalloc_match(slab, gfpflags))) {
+ slab->next = NULL;
+ __put_partials(s, slab);
+ continue;
+ }
+
+ freelist = freeze_slab(s, slab);
+ goto retry_load_slab;
}
+#endif
new_objects:
pc.flags = gfpflags;
- pc.slab = &slab;
pc.orig_size = orig_size;
- freelist = get_partial(s, node, &pc);
- if (freelist)
- goto check_new_slab;
+ slab = get_partial(s, node, &pc);
+ if (slab) {
+ if (kmem_cache_debug(s)) {
+ freelist = pc.object;
+ /*
+ * For debug caches here we had to go through
+ * alloc_single_from_partial() so just store the
+ * tracking info and return the object.
+ */
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, freelist, TRACK_ALLOC, addr);
+
+ return freelist;
+ }
+
+ freelist = freeze_slab(s, slab);
+ goto retry_load_slab;
+ }
slub_put_cpu_ptr(s->cpu_slab);
slab = new_slab(s, gfpflags, node);
@@ -3253,20 +3570,6 @@ new_objects:
inc_slabs_node(s, slab_nid(slab), slab->objects);
-check_new_slab:
-
- if (kmem_cache_debug(s)) {
- /*
- * For debug caches here we had to go through
- * alloc_single_from_partial() so just store the tracking info
- * and return the object
- */
- if (s->flags & SLAB_STORE_USER)
- set_track(s, freelist, TRACK_ALLOC, addr);
-
- return freelist;
- }
-
if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
/*
* For !pfmemalloc_match() case we don't load freelist so that
@@ -3409,12 +3712,11 @@ static void *__slab_alloc_node(struct kmem_cache *s,
void *object;
pc.flags = gfpflags;
- pc.slab = &slab;
pc.orig_size = orig_size;
- object = get_partial(s, node, &pc);
+ slab = get_partial(s, node, &pc);
- if (object)
- return object;
+ if (slab)
+ return pc.object;
slab = new_slab(s, gfpflags, node);
if (unlikely(!slab)) {
@@ -3440,6 +3742,86 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
0, sizeof(void *));
}
+noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+{
+ if (__should_failslab(s, gfpflags))
+ return -ENOMEM;
+ return 0;
+}
+ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
+
+static __fastpath_inline
+struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+ struct list_lru *lru,
+ struct obj_cgroup **objcgp,
+ size_t size, gfp_t flags)
+{
+ flags &= gfp_allowed_mask;
+
+ might_alloc(flags);
+
+ if (unlikely(should_failslab(s, flags)))
+ return NULL;
+
+ if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)))
+ return NULL;
+
+ return s;
+}
+
+static __fastpath_inline
+void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
+ gfp_t flags, size_t size, void **p, bool init,
+ unsigned int orig_size)
+{
+ unsigned int zero_size = s->object_size;
+ bool kasan_init = init;
+ size_t i;
+ gfp_t init_flags = flags & gfp_allowed_mask;
+
+ /*
+ * For kmalloc object, the allocated memory size(object_size) is likely
+ * larger than the requested size(orig_size). If redzone check is
+ * enabled for the extra space, don't zero it, as it will be redzoned
+ * soon. The redzone operation for this extra space could be seen as a
+ * replacement of current poisoning under certain debug option, and
+ * won't break other sanity checks.
+ */
+ if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
+ (s->flags & SLAB_KMALLOC))
+ zero_size = orig_size;
+
+ /*
+ * When slub_debug is enabled, avoid memory initialization integrated
+ * into KASAN and instead zero out the memory via the memset below with
+ * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
+ * cause false-positive reports. This does not lead to a performance
+ * penalty on production builds, as slub_debug is not intended to be
+ * enabled there.
+ */
+ if (__slub_debug_enabled())
+ kasan_init = false;
+
+ /*
+ * As memory initialization might be integrated into KASAN,
+ * kasan_slab_alloc and initialization memset must be
+ * kept together to avoid discrepancies in behavior.
+ *
+ * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
+ */
+ for (i = 0; i < size; i++) {
+ p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
+ if (p[i] && init && (!kasan_init ||
+ !kasan_has_integrated_init()))
+ memset(p[i], 0, zero_size);
+ kmemleak_alloc_recursive(p[i], s->object_size, 1,
+ s->flags, init_flags);
+ kmsan_slab_alloc(s, p[i], init_flags);
+ }
+
+ memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+}
+
/*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call
@@ -3458,7 +3840,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
bool init = false;
s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
- if (!s)
+ if (unlikely(!s))
return NULL;
object = kfence_alloc(s, orig_size, gfpflags);
@@ -3480,53 +3862,169 @@ out:
return object;
}
-static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
- gfp_t gfpflags, unsigned long addr, size_t orig_size)
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
+ void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
+ s->object_size);
+
+ trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
+
+ return ret;
}
+EXPORT_SYMBOL(kmem_cache_alloc);
-static __fastpath_inline
-void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
- gfp_t gfpflags)
+void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+ gfp_t gfpflags)
{
- void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
+ void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
+ s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
return ret;
}
+EXPORT_SYMBOL(kmem_cache_alloc_lru);
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @s: The cache to allocate from.
+ * @gfpflags: See kmalloc().
+ * @node: node number of the target node.
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ *
+ * Return: pointer to the new object or %NULL in case of error
+ */
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
- return __kmem_cache_alloc_lru(s, NULL, gfpflags);
+ void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
+
+ trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
+
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(kmem_cache_alloc_node);
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
- gfp_t gfpflags)
+/*
+ * To avoid unnecessary overhead, we pass through large allocation requests
+ * directly to the page allocator. We use __GFP_COMP, because we will need to
+ * know the allocation order to free the pages properly in kfree.
+ */
+static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- return __kmem_cache_alloc_lru(s, lru, gfpflags);
+ struct folio *folio;
+ void *ptr = NULL;
+ unsigned int order = get_order(size);
+
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
+ flags = kmalloc_fix_flags(flags);
+
+ flags |= __GFP_COMP;
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
+ if (folio) {
+ ptr = folio_address(folio);
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+ PAGE_SIZE << order);
+ }
+
+ ptr = kasan_kmalloc_large(ptr, size, flags);
+ /* As ptr might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc(ptr, size, 1, flags);
+ kmsan_kmalloc_large(ptr, size, flags);
+
+ return ptr;
}
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
-void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t orig_size,
- unsigned long caller)
+void *kmalloc_large(size_t size, gfp_t flags)
{
- return slab_alloc_node(s, NULL, gfpflags, node,
- caller, orig_size);
+ void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
+
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, NUMA_NO_NODE);
+ return ret;
}
+EXPORT_SYMBOL(kmalloc_large);
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
+void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
+ void *ret = __kmalloc_large_node(size, flags, node);
- trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, node);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_large_node);
+static __always_inline
+void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
+ unsigned long caller)
+{
+ struct kmem_cache *s;
+ void *ret;
+
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
+ ret = __kmalloc_large_node(size, flags, node);
+ trace_kmalloc(caller, ret, size,
+ PAGE_SIZE << get_order(size), flags, node);
+ return ret;
+ }
+
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ s = kmalloc_slab(size, flags, caller);
+
+ ret = slab_alloc_node(s, NULL, flags, node, caller, size);
+ ret = kasan_kmalloc(s, ret, size, flags);
+ trace_kmalloc(caller, ret, size, s->size, flags, node);
return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __do_kmalloc_node(size, flags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc);
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
+ int node, unsigned long caller)
+{
+ return __do_kmalloc_node(size, flags, node, caller);
+}
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
+
+void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+{
+ void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
+ _RET_IP_, size);
+
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
+
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_trace);
+
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
+{
+ void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
+
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
+
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_node_trace);
static noinline void free_to_partial_list(
struct kmem_cache *s, struct slab *slab,
@@ -3608,12 +4106,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
unsigned long counters;
struct kmem_cache_node *n = NULL;
unsigned long flags;
+ bool on_node_partial;
stat(s, FREE_SLOWPATH);
- if (kfence_free(head))
- return;
-
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
free_to_partial_list(s, slab, head, tail, cnt, addr);
return;
@@ -3631,18 +4127,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
was_frozen = new.frozen;
new.inuse -= cnt;
if ((!new.inuse || !prior) && !was_frozen) {
-
- if (kmem_cache_has_cpu_partial(s) && !prior) {
-
- /*
- * Slab was on no list before and will be
- * partially empty
- * We can defer the list move and instead
- * freeze it.
- */
- new.frozen = 1;
-
- } else { /* Needs to be taken off a list */
+ /* Needs to be taken off a list */
+ if (!kmem_cache_has_cpu_partial(s) || prior) {
n = get_node(s, slab_nid(slab));
/*
@@ -3655,6 +4141,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
*/
spin_lock_irqsave(&n->list_lock, flags);
+ on_node_partial = slab_test_node_partial(slab);
}
}
@@ -3671,9 +4158,9 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
* activity can be necessary.
*/
stat(s, FREE_FROZEN);
- } else if (new.frozen) {
+ } else if (kmem_cache_has_cpu_partial(s) && !prior) {
/*
- * If we just froze the slab then put it onto the
+ * If we started with a full slab then put it onto the
* per cpu partial list.
*/
put_cpu_partial(s, slab, 1);
@@ -3683,6 +4170,15 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
return;
}
+ /*
+ * This slab was partially empty but not on the per-node partial list,
+ * in which case we shouldn't manipulate its list, just return.
+ */
+ if (prior && !on_node_partial) {
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return;
+ }
+
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
goto slab_empty;
@@ -3735,7 +4231,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct slab *slab, void *head, void *tail,
int cnt, unsigned long addr)
{
- void *tail_obj = tail ? : head;
struct kmem_cache_cpu *c;
unsigned long tid;
void **freelist;
@@ -3754,14 +4249,14 @@ redo:
barrier();
if (unlikely(slab != c->slab)) {
- __slab_free(s, slab, head, tail_obj, cnt, addr);
+ __slab_free(s, slab, head, tail, cnt, addr);
return;
}
if (USE_LOCKLESS_FAST_PATH()) {
freelist = READ_ONCE(c->freelist);
- set_freepointer(s, tail_obj, freelist);
+ set_freepointer(s, tail, freelist);
if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
note_cmpxchg_failure("slab_free", s, tid);
@@ -3778,60 +4273,143 @@ redo:
tid = c->tid;
freelist = c->freelist;
- set_freepointer(s, tail_obj, freelist);
+ set_freepointer(s, tail, freelist);
c->freelist = head;
c->tid = next_tid(tid);
local_unlock(&s->cpu_slab->lock);
}
- stat(s, FREE_FASTPATH);
+ stat_add(s, FREE_FASTPATH, cnt);
}
#else /* CONFIG_SLUB_TINY */
static void do_slab_free(struct kmem_cache *s,
struct slab *slab, void *head, void *tail,
int cnt, unsigned long addr)
{
- void *tail_obj = tail ? : head;
-
- __slab_free(s, slab, head, tail_obj, cnt, addr);
+ __slab_free(s, slab, head, tail, cnt, addr);
}
#endif /* CONFIG_SLUB_TINY */
-static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
- void *head, void *tail, void **p, int cnt,
- unsigned long addr)
+static __fastpath_inline
+void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
+ unsigned long addr)
+{
+ memcg_slab_free_hook(s, slab, &object, 1);
+
+ if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
+ do_slab_free(s, slab, object, object, 1, addr);
+}
+
+static __fastpath_inline
+void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
+ void *tail, void **p, int cnt, unsigned long addr)
{
memcg_slab_free_hook(s, slab, p, cnt);
/*
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
*/
- if (slab_free_freelist_hook(s, &head, &tail, &cnt))
+ if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
do_slab_free(s, slab, head, tail, cnt, addr);
}
#ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
- do_slab_free(cache, virt_to_slab(x), x, NULL, 1, addr);
+ do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
}
#endif
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
+static inline struct kmem_cache *virt_to_cache(const void *obj)
{
- slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
+ struct slab *slab;
+
+ slab = virt_to_slab(obj);
+ if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
+ return NULL;
+ return slab->slab_cache;
}
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+ struct kmem_cache *cachep;
+
+ if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
+ !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
+ return s;
+
+ cachep = virt_to_cache(x);
+ if (WARN(cachep && cachep != s,
+ "%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name))
+ print_tracking(cachep, x);
+ return cachep;
+}
+
+/**
+ * kmem_cache_free - Deallocate an object
+ * @s: The cache the allocation was from.
+ * @x: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ */
void kmem_cache_free(struct kmem_cache *s, void *x)
{
s = cache_from_obj(s, x);
if (!s)
return;
trace_kmem_cache_free(_RET_IP_, x, s);
- slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
+ slab_free(s, virt_to_slab(x), x, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
+static void free_large_kmalloc(struct folio *folio, void *object)
+{
+ unsigned int order = folio_order(folio);
+
+ if (WARN_ON_ONCE(order == 0))
+ pr_warn_once("object pointer: 0x%p\n", object);
+
+ kmemleak_free(object);
+ kasan_kfree_large(object);
+ kmsan_kfree_large(object);
+
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+ -(PAGE_SIZE << order));
+ folio_put(folio);
+}
+
+/**
+ * kfree - free previously allocated memory
+ * @object: pointer returned by kmalloc() or kmem_cache_alloc()
+ *
+ * If @object is NULL, no operation is performed.
+ */
+void kfree(const void *object)
+{
+ struct folio *folio;
+ struct slab *slab;
+ struct kmem_cache *s;
+ void *x = (void *)object;
+
+ trace_kfree(_RET_IP_, object);
+
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
+ return;
+
+ folio = virt_to_folio(object);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_large_kmalloc(folio, (void *)object);
+ return;
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+ slab_free(s, slab, x, _RET_IP_);
+}
+EXPORT_SYMBOL(kfree);
+
struct detached_freelist {
struct slab *slab;
void *tail;
@@ -3911,6 +4489,27 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
return same;
}
+/*
+ * Internal bulk free of objects that were not initialised by the post alloc
+ * hooks and thus should not be processed by the free hooks
+ */
+static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+ if (!size)
+ return;
+
+ do {
+ struct detached_freelist df;
+
+ size = build_detached_freelist(s, size, p, &df);
+ if (!df.slab)
+ continue;
+
+ do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
+ _RET_IP_);
+ } while (likely(size));
+}
+
/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
@@ -3924,15 +4523,16 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (!df.slab)
continue;
- slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
- _RET_IP_);
+ slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
+ df.cnt, _RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
#ifndef CONFIG_SLUB_TINY
-static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+static inline
+int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ void **p)
{
struct kmem_cache_cpu *c;
unsigned long irqflags;
@@ -3986,6 +4586,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
c->freelist = get_freepointer(s, object);
p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
+ stat(s, ALLOC_FASTPATH);
}
c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
@@ -3995,14 +4596,13 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
error:
slub_put_cpu_ptr(s->cpu_slab);
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;
}
#else /* CONFIG_SLUB_TINY */
static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+ size_t size, void **p)
{
int i;
@@ -4025,8 +4625,7 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
return i;
error:
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;
}
#endif /* CONFIG_SLUB_TINY */
@@ -4046,15 +4645,19 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (unlikely(!s))
return 0;
- i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
+ i = __kmem_cache_alloc_bulk(s, flags, size, p);
/*
* memcg and kmem_cache debug support and memory initialization.
* Done outside of the IRQ disabled fastpath loop.
*/
- if (i != 0)
+ if (likely(i != 0)) {
slab_post_alloc_hook(s, objcg, flags, size, p,
slab_want_init_on_alloc(flags, s), s->object_size);
+ } else {
+ memcg_slab_alloc_error_hook(s, size, objcg);
+ }
+
return i;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@@ -4187,7 +4790,7 @@ static inline int calculate_order(unsigned int size)
* Doh this slab cannot be placed using slub_max_order.
*/
order = get_order(size);
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
return order;
return -ENOSYS;
}
@@ -4715,7 +5318,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, (int *)&slub_max_order);
- slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
+ slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
if (slub_min_order > slub_max_order)
slub_min_order = slub_max_order;
@@ -4831,6 +5434,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
if (free == slab->objects) {
list_move(&slab->slab_list, &discard);
+ slab_clear_node_partial(slab);
n->nr_partial--;
dec_slabs_node(s, node, slab->objects);
} else if (free <= SHRINK_PROMOTE_MAX)
diff --git a/mm/sparse.c b/mm/sparse.c
index 77d91e565045..338cf946dee8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -792,6 +792,13 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
unsigned long section_nr = pfn_to_section_nr(pfn);
/*
+ * Mark the section invalid so that valid_section()
+ * return false. This prevents code from dereferencing
+ * ms->usage array.
+ */
+ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+
+ /*
* When removing an early section, the usage map is kept (as the
* usage maps of other sections fall into the same page). It
* will be re-used when re-adding the section - which is then no
@@ -799,16 +806,10 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
* was allocated during boot.
*/
if (!PageReserved(virt_to_page(ms->usage))) {
- kfree(ms->usage);
- ms->usage = NULL;
+ kfree_rcu(ms->usage, rcu);
+ WRITE_ONCE(ms->usage, NULL);
}
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
- /*
- * Mark the section invalid so that valid_section()
- * return false. This prevents code from dereferencing
- * ms->usage array.
- */
- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
}
/*
diff --git a/mm/swap.h b/mm/swap.h
index 73c332ee4d91..758c46ca671e 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -10,7 +10,8 @@ struct mempolicy;
/* linux/mm/page_io.c */
int sio_pool_init(void);
struct swap_iocb;
-void swap_readpage(struct page *page, bool do_poll, struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, bool do_poll,
+ struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
@@ -19,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
}
void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc);
-void __swap_writepage(struct page *page, struct writeback_control *wbc);
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
/* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */
@@ -45,25 +46,24 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index);
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr,
- struct swap_iocb **plug);
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t ilx,
- bool *new_page_allocated);
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct mempolicy *mpol, pgoff_t ilx);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug);
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists);
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+ struct mempolicy *mpol, pgoff_t ilx);
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
- return page_swap_info(&folio->page)->flags;
+ return swp_swap_info(folio->swap)->flags;
}
#else /* CONFIG_SWAP */
struct swap_iocb;
-static inline void swap_readpage(struct page *page, bool do_poll,
+static inline void swap_read_folio(struct folio *folio, bool do_poll,
struct swap_iocb **plug)
{
}
@@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void)
{
}
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
{
return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 85d9e5806a6a..e671266ad772 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -410,13 +410,12 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
return folio;
}
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t ilx,
- bool *new_page_allocated)
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists)
{
struct swap_info_struct *si;
struct folio *folio;
- struct page *page;
void *shadow = NULL;
*new_page_allocated = false;
@@ -433,10 +432,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
*/
folio = filemap_get_folio(swap_address_space(entry),
swp_offset(entry));
- if (!IS_ERR(folio)) {
- page = folio_file_page(folio, swp_offset(entry));
- goto got_page;
- }
+ if (!IS_ERR(folio))
+ goto got_folio;
/*
* Just skip read ahead for unused swap slot.
@@ -450,7 +447,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_put_swap;
/*
- * Get a new page to read into from swap. Allocate it now,
+ * Get a new folio to read into from swap. Allocate it now,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache.
*/
@@ -471,17 +468,28 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_put_swap;
/*
+ * Protect against a recursive call to __read_swap_cache_async()
+ * on the same entry waiting forever here because SWAP_HAS_CACHE
+ * is set but the folio is not the swap cache yet. This can
+ * happen today if mem_cgroup_swapin_charge_folio() below
+ * triggers reclaim through zswap, which may call
+ * __read_swap_cache_async() in the writeback path.
+ */
+ if (skip_if_exists)
+ goto fail_put_swap;
+
+ /*
* We might race against __delete_from_swap_cache(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
- * in swap_map, but not yet added its page to swap cache.
+ * in swap_map, but not yet added its folio to swap cache.
*/
schedule_timeout_uninterruptible(1);
}
/*
- * The swap entry is ours to swap in. Prepare the new page.
+ * The swap entry is ours to swap in. Prepare the new folio.
*/
__folio_set_locked(folio);
@@ -502,10 +510,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* Caller will initiate read into locked folio */
folio_add_lru(folio);
*new_page_allocated = true;
- page = &folio->page;
-got_page:
+got_folio:
put_swap_device(si);
- return page;
+ return folio;
fail_unlock:
put_swap_folio(folio, entry);
@@ -523,26 +530,26 @@ fail_put_swap:
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug)
{
bool page_allocated;
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = get_vma_policy(vma, addr, 0, &ilx);
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
mpol_cond_put(mpol);
if (page_allocated)
- swap_readpage(page, false, plug);
- return page;
+ swap_read_folio(folio, false, plug);
+ return folio;
}
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
@@ -613,7 +620,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* @mpol: NUMA memory allocation policy to be applied
* @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
@@ -624,10 +631,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* are used for every page of the readahead: neighbouring pages on swap
* are fairly likely to have been swapped out from the same node.
*/
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx)
{
- struct page *page;
+ struct folio *folio;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
@@ -652,30 +659,31 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- page = __read_swap_cache_async(
+ folio = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
- gfp_mask, mpol, ilx, &page_allocated);
- if (!page)
+ gfp_mask, mpol, ilx, &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (offset != entry_offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
/* The page was likely read above, so no need for plugging here */
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(page, false, NULL);
- return page;
+ swap_read_folio(folio, false, NULL);
+ zswap_folio_swapin(folio);
+ return folio;
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -779,7 +787,7 @@ static void swap_ra_info(struct vm_fault *vmf,
* @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @vmf: fault information
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma.
@@ -787,13 +795,12 @@ static void swap_ra_info(struct vm_fault *vmf,
* Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
-static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t targ_ilx,
- struct vm_fault *vmf)
+static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
{
struct blk_plug plug;
struct swap_iocb *splug = NULL;
- struct page *page;
+ struct folio *folio;
pte_t *pte = NULL, pentry;
unsigned long addr;
swp_entry_t entry;
@@ -826,18 +833,18 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
continue;
pte_unmap(pte);
pte = NULL;
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
- if (!page)
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (i != ra_info.offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
if (pte)
pte_unmap(pte);
@@ -845,12 +852,13 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
swap_read_unplug(splug);
lru_add_drain();
skip:
- /* The page was likely read above, so no need for plugging here */
- page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
- &page_allocated);
+ /* The folio was likely read above, so no need for plugging here */
+ folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
+ &page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(page, false, NULL);
- return page;
+ swap_read_folio(folio, false, NULL);
+ zswap_folio_swapin(folio);
+ return folio;
}
/**
@@ -870,14 +878,17 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
{
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
- page = swap_use_vma_readahead() ?
+ folio = swap_use_vma_readahead() ?
swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol);
- return page;
+
+ if (!folio)
+ return NULL;
+ return folio_file_page(folio, swp_offset(entry));
}
#ifdef CONFIG_SYSFS
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4bc70f459164..3eec686484ef 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -227,14 +227,14 @@ offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
BUG();
}
-sector_t swap_page_sector(struct page *page)
+sector_t swap_folio_sector(struct folio *folio)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct swap_extent *se;
sector_t sector;
pgoff_t offset;
- offset = __page_file_index(page);
+ offset = swp_offset(folio->swap);
se = offset_to_swap_extent(sis, offset);
sector = se->start_block + (offset - se->start_page);
return sector << (PAGE_SHIFT - 9);
@@ -1495,9 +1495,9 @@ int swp_swapcount(swp_entry_t entry)
do {
page = list_next_entry(page, lru);
- map = kmap_atomic(page);
+ map = kmap_local_page(page);
tmp_count = map[offset];
- kunmap_atomic(map);
+ kunmap_local(map);
count += (tmp_count & ~COUNT_CONTINUED) * n;
n *= (SWAP_CONT_MAX + 1);
@@ -1741,18 +1741,24 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct folio *folio)
{
- struct page *page = folio_file_page(folio, swp_offset(entry));
- struct page *swapcache;
+ struct page *page;
+ struct folio *swapcache;
spinlock_t *ptl;
pte_t *pte, new_pte, old_pte;
- bool hwpoisoned = PageHWPoison(page);
+ bool hwpoisoned = false;
int ret = 1;
- swapcache = page;
- page = ksm_might_need_to_copy(page, vma, addr);
- if (unlikely(!page))
+ swapcache = folio;
+ folio = ksm_might_need_to_copy(folio, vma, addr);
+ if (unlikely(!folio))
return -ENOMEM;
- else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+ else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
+ hwpoisoned = true;
+ folio = swapcache;
+ }
+
+ page = folio_file_page(folio, swp_offset(entry));
+ if (PageHWPoison(page))
hwpoisoned = true;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -1764,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
old_pte = ptep_get(pte);
- if (unlikely(hwpoisoned || !PageUptodate(page))) {
+ if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
if (hwpoisoned) {
- swp_entry = make_hwpoison_entry(swapcache);
- page = swapcache;
+ swp_entry = make_hwpoison_entry(page);
} else {
swp_entry = make_poisoned_swp_entry();
}
@@ -1784,31 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
- arch_swap_restore(entry, page_folio(page));
-
- /* See do_swap_page() */
- BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
- BUG_ON(PageAnon(page) && PageAnonExclusive(page));
+ arch_swap_restore(entry, folio);
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- get_page(page);
- if (page == swapcache) {
+ folio_get(folio);
+ if (folio == swapcache) {
rmap_t rmap_flags = RMAP_NONE;
/*
- * See do_swap_page(): PageWriteback() would be problematic.
- * However, we do a wait_on_page_writeback() just before this
- * call and have the page locked.
+ * See do_swap_page(): writeback would be problematic.
+ * However, we do a folio_wait_writeback() just before this
+ * call and have the folio locked.
*/
- VM_BUG_ON_PAGE(PageWriteback(page), page);
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
if (pte_swp_exclusive(old_pte))
rmap_flags |= RMAP_EXCLUSIVE;
- page_add_anon_rmap(page, vma, addr, rmap_flags);
+ folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
} else { /* ksm created a completely new copy */
- page_add_new_anon_rmap(page, vma, addr);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ folio_add_lru_vma(folio, vma);
}
new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte))
@@ -1821,9 +1822,9 @@ setpte:
out:
if (pte)
pte_unmap_unlock(pte, ptl);
- if (page != swapcache) {
- unlock_page(page);
- put_page(page);
+ if (folio != swapcache) {
+ folio_unlock(folio);
+ folio_put(folio);
}
return ret;
}
@@ -2224,7 +2225,7 @@ EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. A rbtree of swap extents is
- * built at swapon time and is then used at swap_writepage/swap_readpage
+ * built at swapon time and is then used at swap_writepage/swap_read_folio
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
@@ -3368,18 +3369,12 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
return swap_type_to_swap_info(swp_type(entry));
}
-struct swap_info_struct *page_swap_info(struct page *page)
-{
- swp_entry_t entry = page_swap_entry(page);
- return swp_swap_info(entry);
-}
-
/*
* out-of-line methods to avoid include hell.
*/
struct address_space *swapcache_mapping(struct folio *folio)
{
- return page_swap_info(&folio->page)->swap_file->f_mapping;
+ return swp_swap_info(folio->swap)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(swapcache_mapping);
@@ -3477,9 +3472,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
if (!(count & COUNT_CONTINUED))
goto out_unlock_cont;
- map = kmap_atomic(list_page) + offset;
+ map = kmap_local_page(list_page) + offset;
count = *map;
- kunmap_atomic(map);
+ kunmap_local(map);
/*
* If this continuation count now has some space in it,
@@ -3529,7 +3524,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
page = list_next_entry(head, lru);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
@@ -3539,27 +3534,27 @@ static bool swap_count_continued(struct swap_info_struct *si,
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
if (page == head) {
ret = false; /* add count continuation */
goto out;
}
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
- kunmap_atomic(map);
+ kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
*map = COUNT_CONTINUED;
- kunmap_atomic(map);
+ kunmap_local(map);
}
ret = true; /* incremented */
@@ -3569,21 +3564,21 @@ init_map: *map = 0; /* we didn't zero the page */
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
- kunmap_atomic(map);
+ kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
- kunmap_atomic(map);
+ kunmap_local(map);
}
ret = count == COUNT_CONTINUED;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 8e3aa9e8618e..725b150e47ac 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -250,10 +250,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
/*
* Used to get rid of pages on hardware memory corruption.
*/
-int generic_error_remove_page(struct address_space *mapping, struct page *page)
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
if (!mapping)
return -EINVAL;
/*
@@ -262,13 +261,26 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/
if (!S_ISREG(mapping->host->i_mode))
return -EIO;
- return truncate_inode_folio(mapping, page_folio(page));
+ return truncate_inode_folio(mapping, folio);
}
-EXPORT_SYMBOL(generic_error_remove_page);
+EXPORT_SYMBOL(generic_error_remove_folio);
-static long mapping_evict_folio(struct address_space *mapping,
- struct folio *folio)
+/**
+ * mapping_evict_folio() - Remove an unused folio from the page-cache.
+ * @mapping: The mapping this folio belongs to.
+ * @folio: The folio to remove.
+ *
+ * Safely remove one folio from the page cache.
+ * It only drops clean, unused folios.
+ *
+ * Context: Folio must be locked.
+ * Return: The number of pages successfully removed.
+ */
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
{
+ /* The page may have been truncated before it was locked */
+ if (!mapping)
+ return 0;
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
/* The refcount will be elevated if any page in the folio is mapped */
@@ -282,27 +294,6 @@ static long mapping_evict_folio(struct address_space *mapping,
}
/**
- * invalidate_inode_page() - Remove an unused page from the pagecache.
- * @page: The page to remove.
- *
- * Safely invalidate one page from its pagecache mapping.
- * It only drops clean, unused pages.
- *
- * Context: Page must be locked.
- * Return: The number of pages successfully removed.
- */
-long invalidate_inode_page(struct page *page)
-{
- struct folio *folio = page_folio(page);
- struct address_space *mapping = folio_mapping(folio);
-
- /* The page may have been truncated before it was locked */
- if (!mapping)
- return 0;
- return mapping_evict_folio(mapping, folio);
-}
-
-/**
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
@@ -560,9 +551,9 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
EXPORT_SYMBOL(invalidate_mapping_pages);
/*
- * This is like invalidate_inode_page(), except it ignores the page's
+ * This is like mapping_evict_folio(), except it ignores the folio's
* refcount. We do this because invalidate_inode_pages2() needs stronger
- * invalidation guarantees, and cannot afford to leave pages behind because
+ * invalidation guarantees, and cannot afford to leave folios behind because
* shrink_page_list() has a temp ref on them, or because they're transiently
* sitting in the folio_add_lru() caches.
*/
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 0b6ca553bebe..216ab4c8621f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -114,9 +114,9 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
/* Usually, cache pages are already added to LRU */
if (newly_allocated)
folio_add_lru(folio);
- page_add_file_rmap(page, dst_vma, false);
+ folio_add_file_rmap_pte(folio, page, dst_vma);
} else {
- page_add_new_anon_rmap(page, dst_vma, dst_addr);
+ folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
folio_add_lru_vma(folio, dst_vma);
}
@@ -842,3 +842,626 @@ out_unlock:
mmap_read_unlock(dst_mm);
return err;
}
+
+
+void double_pt_lock(spinlock_t *ptl1,
+ spinlock_t *ptl2)
+ __acquires(ptl1)
+ __acquires(ptl2)
+{
+ spinlock_t *ptl_tmp;
+
+ if (ptl1 > ptl2) {
+ /* exchange ptl1 and ptl2 */
+ ptl_tmp = ptl1;
+ ptl1 = ptl2;
+ ptl2 = ptl_tmp;
+ }
+ /* lock in virtual address order to avoid lock inversion */
+ spin_lock(ptl1);
+ if (ptl1 != ptl2)
+ spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
+ else
+ __acquire(ptl2);
+}
+
+void double_pt_unlock(spinlock_t *ptl1,
+ spinlock_t *ptl2)
+ __releases(ptl1)
+ __releases(ptl2)
+{
+ spin_unlock(ptl1);
+ if (ptl1 != ptl2)
+ spin_unlock(ptl2);
+ else
+ __release(ptl2);
+}
+
+
+static int move_present_pte(struct mm_struct *mm,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr,
+ pte_t *dst_pte, pte_t *src_pte,
+ pte_t orig_dst_pte, pte_t orig_src_pte,
+ spinlock_t *dst_ptl, spinlock_t *src_ptl,
+ struct folio *src_folio)
+{
+ int err = 0;
+
+ double_pt_lock(dst_ptl, src_ptl);
+
+ if (!pte_same(*src_pte, orig_src_pte) ||
+ !pte_same(*dst_pte, orig_dst_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (folio_test_large(src_folio) ||
+ folio_maybe_dma_pinned(src_folio) ||
+ !PageAnonExclusive(&src_folio->page)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ folio_move_anon_rmap(src_folio, dst_vma);
+ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
+ orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
+ /* Folio got pinned from under us. Put it back and fail the move. */
+ if (folio_maybe_dma_pinned(src_folio)) {
+ set_pte_at(mm, src_addr, src_pte, orig_src_pte);
+ err = -EBUSY;
+ goto out;
+ }
+
+ orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
+ /* Follow mremap() behavior and treat the entry dirty after the move */
+ orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
+
+ set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
+out:
+ double_pt_unlock(dst_ptl, src_ptl);
+ return err;
+}
+
+static int move_swap_pte(struct mm_struct *mm,
+ unsigned long dst_addr, unsigned long src_addr,
+ pte_t *dst_pte, pte_t *src_pte,
+ pte_t orig_dst_pte, pte_t orig_src_pte,
+ spinlock_t *dst_ptl, spinlock_t *src_ptl)
+{
+ if (!pte_swp_exclusive(orig_src_pte))
+ return -EBUSY;
+
+ double_pt_lock(dst_ptl, src_ptl);
+
+ if (!pte_same(*src_pte, orig_src_pte) ||
+ !pte_same(*dst_pte, orig_dst_pte)) {
+ double_pt_unlock(dst_ptl, src_ptl);
+ return -EAGAIN;
+ }
+
+ orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
+ set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
+ double_pt_unlock(dst_ptl, src_ptl);
+
+ return 0;
+}
+
+/*
+ * The mmap_lock for reading is held by the caller. Just move the page
+ * from src_pmd to dst_pmd if possible, and return true if succeeded
+ * in moving the page.
+ */
+static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr,
+ __u64 mode)
+{
+ swp_entry_t entry;
+ pte_t orig_src_pte, orig_dst_pte;
+ pte_t src_folio_pte;
+ spinlock_t *src_ptl, *dst_ptl;
+ pte_t *src_pte = NULL;
+ pte_t *dst_pte = NULL;
+
+ struct folio *src_folio = NULL;
+ struct anon_vma *src_anon_vma = NULL;
+ struct mmu_notifier_range range;
+ int err = 0;
+
+ flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
+ src_addr, src_addr + PAGE_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+retry:
+ dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
+
+ /* Retry if a huge pmd materialized from under us */
+ if (unlikely(!dst_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
+
+ /*
+ * We held the mmap_lock for reading so MADV_DONTNEED
+ * can zap transparent huge pages under us, or the
+ * transparent huge page fault can establish new
+ * transparent huge pages under us.
+ */
+ if (unlikely(!src_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ /* Sanity checks before the operation */
+ if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
+ WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ spin_lock(dst_ptl);
+ orig_dst_pte = *dst_pte;
+ spin_unlock(dst_ptl);
+ if (!pte_none(orig_dst_pte)) {
+ err = -EEXIST;
+ goto out;
+ }
+
+ spin_lock(src_ptl);
+ orig_src_pte = *src_pte;
+ spin_unlock(src_ptl);
+ if (pte_none(orig_src_pte)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
+ err = -ENOENT;
+ else /* nothing to do to move a hole */
+ err = 0;
+ goto out;
+ }
+
+ /* If PTE changed after we locked the folio them start over */
+ if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ if (pte_present(orig_src_pte)) {
+ /*
+ * Pin and lock both source folio and anon_vma. Since we are in
+ * RCU read section, we can't block, so on contention have to
+ * unmap the ptes, obtain the lock and retry.
+ */
+ if (!src_folio) {
+ struct folio *folio;
+
+ /*
+ * Pin the page while holding the lock to be sure the
+ * page isn't freed under us
+ */
+ spin_lock(src_ptl);
+ if (!pte_same(orig_src_pte, *src_pte)) {
+ spin_unlock(src_ptl);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
+ if (!folio || !PageAnonExclusive(&folio->page)) {
+ spin_unlock(src_ptl);
+ err = -EBUSY;
+ goto out;
+ }
+
+ folio_get(folio);
+ src_folio = folio;
+ src_folio_pte = orig_src_pte;
+ spin_unlock(src_ptl);
+
+ if (!folio_trylock(src_folio)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ /* now we can block and wait */
+ folio_lock(src_folio);
+ goto retry;
+ }
+
+ if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* at this point we have src_folio locked */
+ if (folio_test_large(src_folio)) {
+ /* split_folio() can block */
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ err = split_folio(src_folio);
+ if (err)
+ goto out;
+ /* have to reacquire the folio after it got split */
+ folio_unlock(src_folio);
+ folio_put(src_folio);
+ src_folio = NULL;
+ goto retry;
+ }
+
+ if (!src_anon_vma) {
+ /*
+ * folio_referenced walks the anon_vma chain
+ * without the folio lock. Serialize against it with
+ * the anon_vma lock, the folio lock is not enough.
+ */
+ src_anon_vma = folio_get_anon_vma(src_folio);
+ if (!src_anon_vma) {
+ /* page was unmapped from under us */
+ err = -EAGAIN;
+ goto out;
+ }
+ if (!anon_vma_trylock_write(src_anon_vma)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ /* now we can block and wait */
+ anon_vma_lock_write(src_anon_vma);
+ goto retry;
+ }
+ }
+
+ err = move_present_pte(mm, dst_vma, src_vma,
+ dst_addr, src_addr, dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte,
+ dst_ptl, src_ptl, src_folio);
+ } else {
+ entry = pte_to_swp_entry(orig_src_pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ migration_entry_wait(mm, src_pmd, src_addr);
+ err = -EAGAIN;
+ } else
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = move_swap_pte(mm, dst_addr, src_addr,
+ dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte,
+ dst_ptl, src_ptl);
+ }
+
+out:
+ if (src_anon_vma) {
+ anon_vma_unlock_write(src_anon_vma);
+ put_anon_vma(src_anon_vma);
+ }
+ if (src_folio) {
+ folio_unlock(src_folio);
+ folio_put(src_folio);
+ }
+ if (dst_pte)
+ pte_unmap(dst_pte);
+ if (src_pte)
+ pte_unmap(src_pte);
+ mmu_notifier_invalidate_range_end(&range);
+
+ return err;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline bool move_splits_huge_pmd(unsigned long dst_addr,
+ unsigned long src_addr,
+ unsigned long src_end)
+{
+ return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
+ src_end - src_addr < HPAGE_PMD_SIZE;
+}
+#else
+static inline bool move_splits_huge_pmd(unsigned long dst_addr,
+ unsigned long src_addr,
+ unsigned long src_end)
+{
+ /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
+ return false;
+}
+#endif
+
+static inline bool vma_move_compatible(struct vm_area_struct *vma)
+{
+ return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
+ VM_MIXEDMAP | VM_SHADOW_STACK));
+}
+
+static int validate_move_areas(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *src_vma,
+ struct vm_area_struct *dst_vma)
+{
+ /* Only allow moving if both have the same access and protection */
+ if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
+ pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
+ return -EINVAL;
+
+ /* Only allow moving if both are mlocked or both aren't */
+ if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
+ return -EINVAL;
+
+ /*
+ * For now, we keep it simple and only move between writable VMAs.
+ * Access flags are equal, therefore cheching only the source is enough.
+ */
+ if (!(src_vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+
+ /* Check if vma flags indicate content which can be moved */
+ if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
+ return -EINVAL;
+
+ /* Ensure dst_vma is registered in uffd we are operating on */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx ||
+ dst_vma->vm_userfaultfd_ctx.ctx != ctx)
+ return -EINVAL;
+
+ /* Only allow moving across anonymous vmas */
+ if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
+ return -EINVAL;
+
+ /*
+ * Ensure the dst_vma has a anon_vma or this page
+ * would get a NULL anon_vma when moved in the
+ * dst_vma.
+ */
+ if (unlikely(anon_vma_prepare(dst_vma)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * move_pages - move arbitrary anonymous pages of an existing vma
+ * @ctx: pointer to the userfaultfd context
+ * @mm: the address space to move pages
+ * @dst_start: start of the destination virtual memory range
+ * @src_start: start of the source virtual memory range
+ * @len: length of the virtual memory range
+ * @mode: flags from uffdio_move.mode
+ *
+ * Must be called with mmap_lock held for read.
+ *
+ * move_pages() remaps arbitrary anonymous pages atomically in zero
+ * copy. It only works on non shared anonymous pages because those can
+ * be relocated without generating non linear anon_vmas in the rmap
+ * code.
+ *
+ * It provides a zero copy mechanism to handle userspace page faults.
+ * The source vma pages should have mapcount == 1, which can be
+ * enforced by using madvise(MADV_DONTFORK) on src vma.
+ *
+ * The thread receiving the page during the userland page fault
+ * will receive the faulting page in the source vma through the network,
+ * storage or any other I/O device (MADV_DONTFORK in the source vma
+ * avoids move_pages() to fail with -EBUSY if the process forks before
+ * move_pages() is called), then it will call move_pages() to map the
+ * page in the faulting address in the destination vma.
+ *
+ * This userfaultfd command works purely via pagetables, so it's the
+ * most efficient way to move physical non shared anonymous pages
+ * across different virtual addresses. Unlike mremap()/mmap()/munmap()
+ * it does not create any new vmas. The mapping in the destination
+ * address is atomic.
+ *
+ * It only works if the vma protection bits are identical from the
+ * source and destination vma.
+ *
+ * It can remap non shared anonymous pages within the same vma too.
+ *
+ * If the source virtual memory range has any unmapped holes, or if
+ * the destination virtual memory range is not a whole unmapped hole,
+ * move_pages() will fail respectively with -ENOENT or -EEXIST. This
+ * provides a very strict behavior to avoid any chance of memory
+ * corruption going unnoticed if there are userland race conditions.
+ * Only one thread should resolve the userland page fault at any given
+ * time for any given faulting address. This means that if two threads
+ * try to both call move_pages() on the same destination address at the
+ * same time, the second thread will get an explicit error from this
+ * command.
+ *
+ * The command retval will return "len" is successful. The command
+ * however can be interrupted by fatal signals or errors. If
+ * interrupted it will return the number of bytes successfully
+ * remapped before the interruption if any, or the negative error if
+ * none. It will never return zero. Either it will return an error or
+ * an amount of bytes successfully moved. If the retval reports a
+ * "short" remap, the move_pages() command should be repeated by
+ * userland with src+retval, dst+reval, len-retval if it wants to know
+ * about the error that interrupted it.
+ *
+ * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
+ * prevent -ENOENT errors to materialize if there are holes in the
+ * source virtual range that is being remapped. The holes will be
+ * accounted as successfully remapped in the retval of the
+ * command. This is mostly useful to remap hugepage naturally aligned
+ * virtual regions without knowing if there are transparent hugepage
+ * in the regions or not, but preventing the risk of having to split
+ * the hugepmd during the remap.
+ *
+ * If there's any rmap walk that is taking the anon_vma locks without
+ * first obtaining the folio lock (the only current instance is
+ * folio_referenced), they will have to verify if the folio->mapping
+ * has changed after taking the anon_vma lock. If it changed they
+ * should release the lock and retry obtaining a new anon_vma, because
+ * it means the anon_vma was changed by move_pages() before the lock
+ * could be obtained. This is the only additional complexity added to
+ * the rmap code to provide this anonymous page remapping functionality.
+ */
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 mode)
+{
+ struct vm_area_struct *src_vma, *dst_vma;
+ unsigned long src_addr, dst_addr;
+ pmd_t *src_pmd, *dst_pmd;
+ long err = -EINVAL;
+ ssize_t moved = 0;
+
+ /* Sanitize the command parameters. */
+ if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
+ WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
+ WARN_ON_ONCE(len & ~PAGE_MASK))
+ goto out;
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ if (WARN_ON_ONCE(src_start + len <= src_start) ||
+ WARN_ON_ONCE(dst_start + len <= dst_start))
+ goto out;
+
+ /*
+ * Make sure the vma is not shared, that the src and dst remap
+ * ranges are both valid and fully within a single existing
+ * vma.
+ */
+ src_vma = find_vma(mm, src_start);
+ if (!src_vma || (src_vma->vm_flags & VM_SHARED))
+ goto out;
+ if (src_start < src_vma->vm_start ||
+ src_start + len > src_vma->vm_end)
+ goto out;
+
+ dst_vma = find_vma(mm, dst_start);
+ if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+ goto out;
+ if (dst_start < dst_vma->vm_start ||
+ dst_start + len > dst_vma->vm_end)
+ goto out;
+
+ err = validate_move_areas(ctx, src_vma, dst_vma);
+ if (err)
+ goto out;
+
+ for (src_addr = src_start, dst_addr = dst_start;
+ src_addr < src_start + len;) {
+ spinlock_t *ptl;
+ pmd_t dst_pmdval;
+ unsigned long step_size;
+
+ /*
+ * Below works because anonymous area would not have a
+ * transparent huge PUD. If file-backed support is added,
+ * that case would need to be handled here.
+ */
+ src_pmd = mm_find_pmd(mm, src_addr);
+ if (unlikely(!src_pmd)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
+ err = -ENOENT;
+ break;
+ }
+ src_pmd = mm_alloc_pmd(mm, src_addr);
+ if (unlikely(!src_pmd)) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+ dst_pmd = mm_alloc_pmd(mm, dst_addr);
+ if (unlikely(!dst_pmd)) {
+ err = -ENOMEM;
+ break;
+ }
+
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
+ /*
+ * If the dst_pmd is mapped as THP don't override it and just
+ * be strict. If dst_pmd changes into TPH after this check, the
+ * move_pages_huge_pmd() will detect the change and retry
+ * while move_pages_pte() will detect the change and fail.
+ */
+ if (unlikely(pmd_trans_huge(dst_pmdval))) {
+ err = -EEXIST;
+ break;
+ }
+
+ ptl = pmd_trans_huge_lock(src_pmd, src_vma);
+ if (ptl) {
+ if (pmd_devmap(*src_pmd)) {
+ spin_unlock(ptl);
+ err = -ENOENT;
+ break;
+ }
+
+ /* Check if we can move the pmd without splitting it. */
+ if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
+ !pmd_none(dst_pmdval)) {
+ struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
+
+ if (!folio || !PageAnonExclusive(&folio->page)) {
+ spin_unlock(ptl);
+ err = -EBUSY;
+ break;
+ }
+
+ spin_unlock(ptl);
+ split_huge_pmd(src_vma, src_pmd, src_addr);
+ /* The folio will be split by move_pages_pte() */
+ continue;
+ }
+
+ err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
+ dst_pmdval, dst_vma, src_vma,
+ dst_addr, src_addr);
+ step_size = HPAGE_PMD_SIZE;
+ } else {
+ if (pmd_none(*src_pmd)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
+ err = -ENOENT;
+ break;
+ }
+ if (unlikely(__pte_alloc(mm, src_pmd))) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (unlikely(pte_alloc(mm, dst_pmd))) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = move_pages_pte(mm, dst_pmd, src_pmd,
+ dst_vma, src_vma,
+ dst_addr, src_addr, mode);
+ step_size = PAGE_SIZE;
+ }
+
+ cond_resched();
+
+ if (fatal_signal_pending(current)) {
+ /* Do not override an error */
+ if (!err || err == -EAGAIN)
+ err = -EINTR;
+ break;
+ }
+
+ if (err) {
+ if (err == -EAGAIN)
+ continue;
+ break;
+ }
+
+ /* Proceed to the next page */
+ dst_addr += step_size;
+ src_addr += step_size;
+ moved += step_size;
+ }
+
+out:
+ VM_WARN_ON(moved < 0);
+ VM_WARN_ON(err > 0);
+ VM_WARN_ON(!moved && !err);
+ return moved ? moved : err;
+}
diff --git a/mm/util.c b/mm/util.c
index 744b4d7e3fae..5a6a9802583b 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1047,11 +1047,11 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
char *addr1, *addr2;
int ret;
- addr1 = kmap_atomic(page1);
- addr2 = kmap_atomic(page2);
+ addr1 = kmap_local_page(page1);
+ addr2 = kmap_local_page(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
- kunmap_atomic(addr2);
- kunmap_atomic(addr1);
+ kunmap_local(addr2);
+ kunmap_local(addr1);
return ret;
}
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 22c6689d9302..bd5183dfd879 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -169,7 +169,7 @@ static bool vmpressure_event(struct vmpressure *vmpr,
continue;
if (level < ev->level)
continue;
- eventfd_signal(ev->efd, 1);
+ eventfd_signal(ev->efd);
ret = true;
}
mutex_unlock(&vmpr->events_lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9dd8977de5a2..4f9c854ce6cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -411,10 +411,10 @@ static int reclaimer_offset(void)
{
BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
- BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
- PGSCAN_DIRECT - PGSCAN_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
+ BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
+ PGSCAN_DIRECT - PGSCAN_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
@@ -977,7 +977,8 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
- __count_vm_events(PGDEMOTE_KSWAPD + reclaimer_offset(), nr_succeeded);
+ mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ nr_succeeded);
return nr_succeeded;
}
@@ -2222,7 +2223,7 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
* Flush the memory cgroup stats, so that we read accurate per-memcg
* lruvec stats for heuristics.
*/
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(sc->target_mem_cgroup);
/*
* Determine the scan balance between anon and file LRUs.
@@ -2667,13 +2668,14 @@ static void get_item_key(void *item, int *key)
key[1] = hash >> BLOOM_FILTER_SHIFT;
}
-static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
+ void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return true;
@@ -2682,13 +2684,14 @@ static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *it
return test_bit(key[0], filter) && test_bit(key[1], filter);
}
-static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
+ void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return;
@@ -2700,12 +2703,12 @@ static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *
set_bit(key[1], filter);
}
-static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
+static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
{
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = lruvec->mm_state.filters[gen];
+ filter = mm_state->filters[gen];
if (filter) {
bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
return;
@@ -2713,13 +2716,15 @@ static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
- WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
+ WRITE_ONCE(mm_state->filters[gen], filter);
}
/******************************************************************************
* mm_struct list
******************************************************************************/
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
static struct lru_gen_mm_list mm_list = {
@@ -2736,6 +2741,29 @@ static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
return &mm_list;
}
+static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
+{
+ return &lruvec->mm_state;
+}
+
+static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
+{
+ int key;
+ struct mm_struct *mm;
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+ struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
+
+ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
+ key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
+
+ if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
+ return NULL;
+
+ clear_bit(key, &mm->lru_gen.bitmap);
+
+ return mmget_not_zero(mm) ? mm : NULL;
+}
+
void lru_gen_add_mm(struct mm_struct *mm)
{
int nid;
@@ -2751,10 +2779,11 @@ void lru_gen_add_mm(struct mm_struct *mm)
for_each_node_state(nid, N_MEMORY) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* the first addition since the last iteration */
- if (lruvec->mm_state.tail == &mm_list->fifo)
- lruvec->mm_state.tail = &mm->lru_gen.list;
+ if (mm_state->tail == &mm_list->fifo)
+ mm_state->tail = &mm->lru_gen.list;
}
list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
@@ -2780,14 +2809,15 @@ void lru_gen_del_mm(struct mm_struct *mm)
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* where the current iteration continues after */
- if (lruvec->mm_state.head == &mm->lru_gen.list)
- lruvec->mm_state.head = lruvec->mm_state.head->prev;
+ if (mm_state->head == &mm->lru_gen.list)
+ mm_state->head = mm_state->head->prev;
/* where the last iteration ended before */
- if (lruvec->mm_state.tail == &mm->lru_gen.list)
- lruvec->mm_state.tail = lruvec->mm_state.tail->next;
+ if (mm_state->tail == &mm->lru_gen.list)
+ mm_state->tail = mm_state->tail->next;
}
list_del_init(&mm->lru_gen.list);
@@ -2830,10 +2860,30 @@ void lru_gen_migrate_mm(struct mm_struct *mm)
}
#endif
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
+
+static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
+
+static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
+{
+ return NULL;
+}
+
+static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
+{
+ return NULL;
+}
+
+#endif
+
static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
{
int i;
int hist;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
@@ -2841,44 +2891,20 @@ static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
hist = lru_hist_from_seq(walk->max_seq);
for (i = 0; i < NR_MM_STATS; i++) {
- WRITE_ONCE(lruvec->mm_state.stats[hist][i],
- lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
+ WRITE_ONCE(mm_state->stats[hist][i],
+ mm_state->stats[hist][i] + walk->mm_stats[i]);
walk->mm_stats[i] = 0;
}
}
if (NR_HIST_GENS > 1 && last) {
- hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
+ hist = lru_hist_from_seq(mm_state->seq + 1);
for (i = 0; i < NR_MM_STATS; i++)
- WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
+ WRITE_ONCE(mm_state->stats[hist][i], 0);
}
}
-static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-{
- int type;
- unsigned long size = 0;
- struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
- int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
-
- if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
- return true;
-
- clear_bit(key, &mm->lru_gen.bitmap);
-
- for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
- size += type ? get_mm_counter(mm, MM_FILEPAGES) :
- get_mm_counter(mm, MM_ANONPAGES) +
- get_mm_counter(mm, MM_SHMEMPAGES);
- }
-
- if (size < MIN_LRU_BATCH)
- return true;
-
- return !mmget_not_zero(mm);
-}
-
static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
struct mm_struct **iter)
{
@@ -2887,7 +2913,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
struct mm_struct *mm = NULL;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/*
* mm_state->seq is incremented after each iteration of mm_list. There
@@ -2925,11 +2951,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
mm_state->tail = mm_state->head->next;
walk->force_scan = true;
}
-
- mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
- if (should_skip_mm(mm, walk))
- mm = NULL;
- } while (!mm);
+ } while (!(mm = get_next_mm(walk)));
done:
if (*iter || last)
reset_mm_stats(lruvec, walk, last);
@@ -2937,7 +2959,7 @@ done:
spin_unlock(&mm_list->lock);
if (mm && first)
- reset_bloom_filter(lruvec, walk->max_seq + 1);
+ reset_bloom_filter(mm_state, walk->max_seq + 1);
if (*iter)
mmput_async(*iter);
@@ -2952,7 +2974,7 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
bool success = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
spin_lock(&mm_list->lock);
@@ -3248,7 +3270,6 @@ static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned
return pfn;
}
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
{
unsigned long pfn = pmd_pfn(pmd);
@@ -3266,7 +3287,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
return pfn;
}
-#endif
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
struct pglist_data *pgdat, bool can_swap)
@@ -3369,7 +3389,6 @@ restart:
return suitable_to_scan(total, young);
}
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
{
@@ -3447,12 +3466,6 @@ next:
done:
*first = -1;
}
-#else
-static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
- struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
-{
-}
-#endif
static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
struct mm_walk *args)
@@ -3465,6 +3478,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
unsigned long first = -1;
struct lru_gen_mm_walk *walk = args->private;
+ struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
VM_WARN_ON_ONCE(pud_leaf(*pud));
@@ -3487,7 +3501,6 @@ restart:
continue;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(val)) {
unsigned long pfn = pmd_pfn(val);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
@@ -3506,7 +3519,7 @@ restart:
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
continue;
}
-#endif
+
walk->mm_stats[MM_NONLEAF_TOTAL]++;
if (should_clear_pmd_young()) {
@@ -3516,7 +3529,7 @@ restart:
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
}
- if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
+ if (!walk->force_scan && !test_bloom_filter(mm_state, walk->max_seq, pmd + i))
continue;
walk->mm_stats[MM_NONLEAF_FOUND]++;
@@ -3527,7 +3540,7 @@ restart:
walk->mm_stats[MM_NONLEAF_ADDED]++;
/* carry over to the next generation */
- update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
+ update_bloom_filter(mm_state, walk->max_seq + 1, pmd + i);
}
walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
@@ -3734,16 +3747,25 @@ next:
return success;
}
-static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+static bool inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+ bool can_swap, bool force_scan)
{
+ bool success;
int prev, next;
int type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
restart:
+ if (max_seq < READ_ONCE(lrugen->max_seq))
+ return false;
+
spin_lock_irq(&lruvec->lru_lock);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+ success = max_seq == lrugen->max_seq;
+ if (!success)
+ goto unlock;
+
for (type = ANON_AND_FILE - 1; type >= 0; type--) {
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
@@ -3787,8 +3809,10 @@ restart:
WRITE_ONCE(lrugen->timestamps[next], jiffies);
/* make sure preceding modifications appear */
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
-
+unlock:
spin_unlock_irq(&lruvec->lru_lock);
+
+ return success;
}
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
@@ -3798,14 +3822,16 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
+ if (!mm_state)
+ return inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+
/* see the comment in iterate_mm_list() */
- if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
- success = false;
- goto done;
- }
+ if (max_seq <= READ_ONCE(mm_state->seq))
+ return false;
/*
* If the hardware doesn't automatically set the accessed bit, fallback
@@ -3835,8 +3861,10 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
walk_mm(lruvec, mm, walk);
} while (mm);
done:
- if (success)
- inc_max_seq(lruvec, can_swap, force_scan);
+ if (success) {
+ success = inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+ WARN_ON_ONCE(!success);
+ }
return success;
}
@@ -3955,11 +3983,13 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
int young = 0;
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
+ struct vm_area_struct *vma = pvmw->vma;
struct folio *folio = pfn_folio(pvmw->pfn);
bool can_swap = !folio_is_file_lru(folio);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
DEFINE_MAX_SEQ(lruvec);
int old_gen, new_gen = lru_gen_from_seq(max_seq);
@@ -3969,11 +3999,15 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (spin_is_contended(pvmw->ptl))
return;
+ /* exclude special VMAs containing anon pages from COW */
+ if (vma->vm_flags & VM_SPECIAL)
+ return;
+
/* avoid taking the LRU lock under the PTL when possible */
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
- start = max(addr & PMD_MASK, pvmw->vma->vm_start);
- end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
+ start = max(addr & PMD_MASK, vma->vm_start);
+ end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
@@ -3998,7 +4032,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
unsigned long pfn;
pte_t ptent = ptep_get(pte + i);
- pfn = get_pte_pfn(ptent, pvmw->vma, addr);
+ pfn = get_pte_pfn(ptent, vma, addr);
if (pfn == -1)
continue;
@@ -4009,7 +4043,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (!folio)
continue;
- if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
+ if (!ptep_test_and_clear_young(vma, addr, pte + i))
VM_WARN_ON_ONCE(true);
young++;
@@ -4038,8 +4072,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
mem_cgroup_unlock_pages();
/* feedback from rmap walkers to page table walkers */
- if (suitable_to_scan(i, young))
- update_bloom_filter(lruvec, max_seq, pvmw->pmd);
+ if (mm_state && suitable_to_scan(i, young))
+ update_bloom_filter(mm_state, max_seq, pvmw->pmd);
}
/******************************************************************************
@@ -4055,13 +4089,6 @@ enum {
MEMCG_LRU_YOUNG,
};
-#ifdef CONFIG_MEMCG
-
-static int lru_gen_memcg_seg(struct lruvec *lruvec)
-{
- return READ_ONCE(lruvec->lrugen.seg);
-}
-
static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
{
int seg;
@@ -4108,6 +4135,8 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
}
+#ifdef CONFIG_MEMCG
+
void lru_gen_online_memcg(struct mem_cgroup *memcg)
{
int gen;
@@ -4175,18 +4204,11 @@ void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
struct lruvec *lruvec = get_lruvec(memcg, nid);
/* see the comment on MEMCG_NR_GENS */
- if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
+ if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
}
-#else /* !CONFIG_MEMCG */
-
-static int lru_gen_memcg_seg(struct lruvec *lruvec)
-{
- return 0;
-}
-
-#endif
+#endif /* CONFIG_MEMCG */
/******************************************************************************
* the eviction
@@ -4734,7 +4756,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
if (mem_cgroup_below_low(NULL, memcg)) {
/* see the comment on MEMCG_NR_GENS */
- if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
+ if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
return MEMCG_LRU_TAIL;
memcg_memory_event(memcg, MEMCG_LOW);
@@ -4757,12 +4779,10 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
return 0;
/* one retry if offlined or too small */
- return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
+ return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
}
-#ifdef CONFIG_MEMCG
-
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
int op;
@@ -4854,20 +4874,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
blk_finish_plug(&plug);
}
-#else /* !CONFIG_MEMCG */
-
-static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
-{
- BUILD_BUG();
-}
-
-static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-{
- BUILD_BUG();
-}
-
-#endif
-
static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
{
int priority;
@@ -5215,6 +5221,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
int type, tier;
int hist = lru_hist_from_seq(seq);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
@@ -5240,6 +5247,9 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
seq_putc(m, '\n');
}
+ if (!mm_state)
+ return;
+
seq_puts(m, " ");
for (i = 0; i < NR_MM_STATS; i++) {
const char *s = " ";
@@ -5247,10 +5257,10 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
if (seq == max_seq && NR_HIST_GENS == 1) {
s = "LOYNFA";
- n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ n = READ_ONCE(mm_state->stats[hist][i]);
} else if (seq != max_seq && NR_HIST_GENS > 1) {
s = "loynfa";
- n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ n = READ_ONCE(mm_state->stats[hist][i]);
}
seq_printf(m, " %10lu%c", n, s[i]);
@@ -5514,11 +5524,24 @@ static const struct file_operations lru_gen_ro_fops = {
* initialization
******************************************************************************/
+void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+ int i, j;
+
+ spin_lock_init(&pgdat->memcg_lru.lock);
+
+ for (i = 0; i < MEMCG_NR_GENS; i++) {
+ for (j = 0; j < MEMCG_NR_BINS; j++)
+ INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
+ }
+}
+
void lru_gen_init_lruvec(struct lruvec *lruvec)
{
int i;
int gen, type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lrugen->max_seq = MIN_NR_GENS + 1;
lrugen->enabled = lru_gen_enabled();
@@ -5529,47 +5552,46 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
for_each_gen_type_zone(gen, type, zone)
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
- lruvec->mm_state.seq = MIN_NR_GENS;
+ if (mm_state)
+ mm_state->seq = MIN_NR_GENS;
}
#ifdef CONFIG_MEMCG
-void lru_gen_init_pgdat(struct pglist_data *pgdat)
+void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
- int i, j;
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- spin_lock_init(&pgdat->memcg_lru.lock);
+ if (!mm_list)
+ return;
- for (i = 0; i < MEMCG_NR_GENS; i++) {
- for (j = 0; j < MEMCG_NR_BINS; j++)
- INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
- }
-}
-
-void lru_gen_init_memcg(struct mem_cgroup *memcg)
-{
- INIT_LIST_HEAD(&memcg->mm_list.fifo);
- spin_lock_init(&memcg->mm_list.lock);
+ INIT_LIST_HEAD(&mm_list->fifo);
+ spin_lock_init(&mm_list->lock);
}
void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
int i;
int nid;
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo));
+ VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo));
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
sizeof(lruvec->lrugen.nr_pages)));
lruvec->lrugen.list.next = LIST_POISON1;
+ if (!mm_state)
+ continue;
+
for (i = 0; i < NR_BLOOM_FILTERS; i++) {
- bitmap_free(lruvec->mm_state.filters[i]);
- lruvec->mm_state.filters[i] = NULL;
+ bitmap_free(mm_state->filters[i]);
+ mm_state->filters[i] = NULL;
}
}
}
@@ -5595,14 +5617,17 @@ late_initcall(init_lru_gen);
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
+ BUILD_BUG();
}
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
+ BUILD_BUG();
}
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
{
+ BUILD_BUG();
}
#endif /* CONFIG_LRU_GEN */
@@ -6395,7 +6420,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
* scan_control uses s8 fields for order, priority, and reclaim_idx.
* Confirm they are large enough for max values.
*/
- BUILD_BUG_ON(MAX_ORDER >= S8_MAX);
+ BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX);
BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 359460deb377..db79935e4a54 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1059,7 +1059,7 @@ static void fill_contig_page_info(struct zone *zone,
info->free_blocks_total = 0;
info->free_blocks_suitable = 0;
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
unsigned long blocks;
/*
@@ -1092,7 +1092,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
{
unsigned long requested = 1UL << order;
- if (WARN_ON_ONCE(order > MAX_ORDER))
+ if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
return 0;
if (!info->free_blocks_total)
@@ -1249,6 +1249,9 @@ const char * const vmstat_text[] = {
"pgpromote_success",
"pgpromote_candidate",
#endif
+ "pgdemote_kswapd",
+ "pgdemote_direct",
+ "pgdemote_khugepaged",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
@@ -1279,9 +1282,6 @@ const char * const vmstat_text[] = {
"pgsteal_kswapd",
"pgsteal_direct",
"pgsteal_khugepaged",
- "pgdemote_kswapd",
- "pgdemote_direct",
- "pgdemote_khugepaged",
"pgscan_kswapd",
"pgscan_direct",
"pgscan_khugepaged",
@@ -1401,6 +1401,7 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_ZSWAP
"zswpin",
"zswpout",
+ "zswpwb",
#endif
#ifdef CONFIG_X86
"direct_map_level2_splits",
@@ -1475,7 +1476,7 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
int order;
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (order = 0; order <= MAX_ORDER; ++order)
+ for (order = 0; order < NR_PAGE_ORDERS; ++order)
/*
* Access to nr_free is lockless as nr_free is used only for
* printing purposes. Use data_race to avoid KCSAN warning.
@@ -1504,7 +1505,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
pgdat->node_id,
zone->name,
migratetype_names[mtype]);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
unsigned long freecount = 0;
struct free_area *area;
struct list_head *curr;
@@ -1544,7 +1545,7 @@ static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
/* Print header */
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
- for (order = 0; order <= MAX_ORDER; ++order)
+ for (order = 0; order < NR_PAGE_ORDERS; ++order)
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
@@ -2180,7 +2181,7 @@ static void unusable_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
fill_contig_page_info(zone, order, &info);
index = unusable_free_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
@@ -2232,7 +2233,7 @@ static void extfrag_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
fill_contig_page_info(zone, order, &info);
index = __fragmentation_index(order, &info);
seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
diff --git a/mm/workingset.c b/mm/workingset.c
index 33baad203277..226012974328 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -425,8 +425,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
struct pglist_data *pgdat;
unsigned long eviction;
- if (lru_gen_enabled())
- return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);
+ rcu_read_lock();
+
+ if (lru_gen_enabled()) {
+ bool recent = lru_gen_test_recent(shadow, file,
+ &eviction_lruvec, &eviction, workingset);
+
+ rcu_read_unlock();
+ return recent;
+ }
+
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
eviction <<= bucket_order;
@@ -448,8 +456,20 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
* configurations instead.
*/
eviction_memcg = mem_cgroup_from_id(memcgid);
- if (!mem_cgroup_disabled() && !eviction_memcg)
+ if (!mem_cgroup_disabled() &&
+ (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
+ rcu_read_unlock();
return false;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * Flush stats (and potentially sleep) outside the RCU read section.
+ * XXX: With per-memcg flushing and thresholding, is ratelimiting
+ * still needed here?
+ */
+ mem_cgroup_flush_stats_ratelimited(eviction_memcg);
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
refault = atomic_long_read(&eviction_lruvec->nonresident_age);
@@ -493,6 +513,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
}
}
+ mem_cgroup_put(eviction_memcg);
return refault_distance <= workingset_size;
}
@@ -519,19 +540,16 @@ void workingset_refault(struct folio *folio, void *shadow)
return;
}
- /* Flush stats (and potentially sleep) before holding RCU read lock */
- mem_cgroup_flush_stats_ratelimited();
-
- rcu_read_lock();
-
/*
* The activation decision for this folio is made at the level
* where the eviction occurred, as that is where the LRU order
* during folio reclaim is being determined.
*
* However, the cgroup that will own the folio is the one that
- * is actually experiencing the refault event.
+ * is actually experiencing the refault event. Make sure the folio is
+ * locked to guarantee folio_memcg() stability throughout.
*/
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
nr = folio_nr_pages(folio);
memcg = folio_memcg(folio);
pgdat = folio_pgdat(folio);
@@ -540,7 +558,7 @@ void workingset_refault(struct folio *folio, void *shadow)
mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
if (!workingset_test_recent(shadow, file, &workingset))
- goto out;
+ return;
folio_set_active(folio);
workingset_age_nonresident(lruvec, nr);
@@ -556,8 +574,6 @@ void workingset_refault(struct folio *folio, void *shadow)
lru_note_cost_refault(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
}
-out:
- rcu_read_unlock();
}
/**
@@ -615,12 +631,12 @@ void workingset_update_node(struct xa_node *node)
if (node->count && node->count == node->nr_values) {
if (list_empty(&node->private_list)) {
- list_lru_add(&shadow_nodes, &node->private_list);
+ list_lru_add_obj(&shadow_nodes, &node->private_list);
__inc_lruvec_kmem_state(node, WORKINGSET_NODES);
}
} else {
if (!list_empty(&node->private_list)) {
- list_lru_del(&shadow_nodes, &node->private_list);
+ list_lru_del_obj(&shadow_nodes, &node->private_list);
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
}
}
@@ -664,7 +680,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct lruvec *lruvec;
int i;
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats_ratelimited(sc->memcg);
lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state_local(lruvec,
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b1c0dad7f4cf..c937635e0ad1 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1364,9 +1364,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
int newfg;
struct zspage *zspage;
- if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ if (unlikely(!size))
return (unsigned long)ERR_PTR(-EINVAL);
+ if (unlikely(size > ZS_MAX_ALLOC_SIZE))
+ return (unsigned long)ERR_PTR(-ENOSPC);
+
handle = cache_alloc_handle(pool, gfp);
if (!handle)
return (unsigned long)ERR_PTR(-ENOMEM);
diff --git a/mm/zswap.c b/mm/zswap.c
index 74411dfdad92..ca25b676048e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -35,6 +35,7 @@
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/workqueue.h>
+#include <linux/list_lru.h>
#include "swap.h"
#include "internal.h"
@@ -147,6 +148,16 @@ module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
/* Number of zpools in zswap_pool (empirically determined for scalability) */
#define ZSWAP_NR_ZPOOLS 32
+/* Enable/disable memory pressure-based shrinker. */
+static bool zswap_shrinker_enabled = IS_ENABLED(
+ CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
+module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
+
+bool is_zswap_enabled(void)
+{
+ return zswap_enabled;
+}
+
/*********************************
* data structures
**********************************/
@@ -155,8 +166,8 @@ struct crypto_acomp_ctx {
struct crypto_acomp *acomp;
struct acomp_req *req;
struct crypto_wait wait;
- u8 *dstmem;
- struct mutex *mutex;
+ u8 *buffer;
+ struct mutex mutex;
};
/*
@@ -174,8 +185,10 @@ struct zswap_pool {
struct work_struct shrink_work;
struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
- struct list_head lru;
- spinlock_t lru_lock;
+ struct list_lru list_lru;
+ struct mem_cgroup *next_shrink;
+ struct shrinker *shrinker;
+ atomic_t nr_stored;
};
/*
@@ -274,32 +287,72 @@ static bool zswap_can_accept(void)
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
}
+static u64 get_zswap_pool_size(struct zswap_pool *pool)
+{
+ u64 pool_size = 0;
+ int i;
+
+ for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
+ pool_size += zpool_get_total_size(pool->zpools[i]);
+
+ return pool_size;
+}
+
static void zswap_update_total_size(void)
{
struct zswap_pool *pool;
u64 total = 0;
- int i;
rcu_read_lock();
list_for_each_entry_rcu(pool, &zswap_pools, list)
- for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
- total += zpool_get_total_size(pool->zpools[i]);
+ total += get_zswap_pool_size(pool);
rcu_read_unlock();
zswap_pool_total_size = total;
}
+/* should be called under RCU */
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+ return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
+}
+#else
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+ return NULL;
+}
+#endif
+
+static inline int entry_to_nid(struct zswap_entry *entry)
+{
+ return page_to_nid(virt_to_page(entry));
+}
+
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
+{
+ struct zswap_pool *pool;
+
+ /* lock out zswap pools list modification */
+ spin_lock(&zswap_pools_lock);
+ list_for_each_entry(pool, &zswap_pools, list) {
+ if (pool->next_shrink == memcg)
+ pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
+ }
+ spin_unlock(&zswap_pools_lock);
+}
+
/*********************************
* zswap entry functions
**********************************/
static struct kmem_cache *zswap_entry_cache;
-static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
{
struct zswap_entry *entry;
- entry = kmem_cache_alloc(zswap_entry_cache, gfp);
+ entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
if (!entry)
return NULL;
entry->refcount = 1;
@@ -313,6 +366,100 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
}
/*********************************
+* zswap lruvec functions
+**********************************/
+void zswap_lruvec_state_init(struct lruvec *lruvec)
+{
+ atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+}
+
+void zswap_folio_swapin(struct folio *folio)
+{
+ struct lruvec *lruvec;
+
+ if (folio) {
+ lruvec = folio_lruvec(folio);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ }
+}
+
+/*********************************
+* lru functions
+**********************************/
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+ atomic_long_t *nr_zswap_protected;
+ unsigned long lru_size, old, new;
+ int nid = entry_to_nid(entry);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ /*
+ * Note that it is safe to use rcu_read_lock() here, even in the face of
+ * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
+ * used in list_lru lookup, only two scenarios are possible:
+ *
+ * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
+ * new entry will be reparented to memcg's parent's list_lru.
+ * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
+ * new entry will be added directly to memcg's parent's list_lru.
+ *
+ * Similar reasoning holds for list_lru_del() and list_lru_putback().
+ */
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ /* will always succeed */
+ list_lru_add(list_lru, &entry->lru, nid, memcg);
+
+ /* Update the protection area */
+ lru_size = list_lru_count_one(list_lru, nid, memcg);
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
+ nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
+ old = atomic_long_inc_return(nr_zswap_protected);
+ /*
+ * Decay to avoid overflow and adapt to changing workloads.
+ * This is based on LRU reclaim cost decaying heuristics.
+ */
+ do {
+ new = old > lru_size / 4 ? old / 2 : old;
+ } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
+ rcu_read_unlock();
+}
+
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+ int nid = entry_to_nid(entry);
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ /* will always succeed */
+ list_lru_del(list_lru, &entry->lru, nid, memcg);
+ rcu_read_unlock();
+}
+
+static void zswap_lru_putback(struct list_lru *list_lru,
+ struct zswap_entry *entry)
+{
+ int nid = entry_to_nid(entry);
+ spinlock_t *lock = &list_lru->node[nid].lock;
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ spin_lock(lock);
+ /* we cannot use list_lru_add here, because it increments node's lru count */
+ list_lru_putback(list_lru, &entry->lru, nid, memcg);
+ spin_unlock(lock);
+
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
+ /* increment the protection area to account for the LRU rotation. */
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ rcu_read_unlock();
+}
+
+/*********************************
* rbtree functions
**********************************/
static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
@@ -396,10 +543,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
- spin_lock(&entry->pool->lru_lock);
- list_del(&entry->lru);
- spin_unlock(&entry->pool->lru_lock);
+ zswap_lru_del(&entry->pool->list_lru, entry);
zpool_free(zswap_find_zpool(entry), entry->handle);
+ atomic_dec(&entry->pool->nr_stored);
zswap_pool_put(entry->pool);
}
zswap_entry_cache_free(entry);
@@ -442,65 +588,132 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
}
/*********************************
-* per-cpu code
+* shrinker functions
**********************************/
-static DEFINE_PER_CPU(u8 *, zswap_dstmem);
-/*
- * If users dynamically change the zpool type and compressor at runtime, i.e.
- * zswap is running, zswap can have more than one zpool on one cpu, but they
- * are sharing dtsmem. So we need this mutex to be per-cpu.
- */
-static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
+static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
+ spinlock_t *lock, void *arg);
-static int zswap_dstmem_prepare(unsigned int cpu)
+static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct mutex *mutex;
- u8 *dst;
+ struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
+ unsigned long shrink_ret, nr_protected, lru_size;
+ struct zswap_pool *pool = shrinker->private_data;
+ bool encountered_page_in_swapcache = false;
+
+ if (!zswap_shrinker_enabled ||
+ !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
+ sc->nr_scanned = 0;
+ return SHRINK_STOP;
+ }
- dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!dst)
- return -ENOMEM;
+ nr_protected =
+ atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ lru_size = list_lru_shrink_count(&pool->list_lru, sc);
- mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
- if (!mutex) {
- kfree(dst);
- return -ENOMEM;
+ /*
+ * Abort if we are shrinking into the protected region.
+ *
+ * This short-circuiting is necessary because if we have too many multiple
+ * concurrent reclaimers getting the freeable zswap object counts at the
+ * same time (before any of them made reasonable progress), the total
+ * number of reclaimed objects might be more than the number of unprotected
+ * objects (i.e the reclaimers will reclaim into the protected area of the
+ * zswap LRU).
+ */
+ if (nr_protected >= lru_size - sc->nr_to_scan) {
+ sc->nr_scanned = 0;
+ return SHRINK_STOP;
}
- mutex_init(mutex);
- per_cpu(zswap_dstmem, cpu) = dst;
- per_cpu(zswap_mutex, cpu) = mutex;
- return 0;
+ shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
+ &encountered_page_in_swapcache);
+
+ if (encountered_page_in_swapcache)
+ return SHRINK_STOP;
+
+ return shrink_ret ? shrink_ret : SHRINK_STOP;
}
-static int zswap_dstmem_dead(unsigned int cpu)
+static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct mutex *mutex;
- u8 *dst;
+ struct zswap_pool *pool = shrinker->private_data;
+ struct mem_cgroup *memcg = sc->memcg;
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
+ unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
- mutex = per_cpu(zswap_mutex, cpu);
- kfree(mutex);
- per_cpu(zswap_mutex, cpu) = NULL;
+ if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
+ return 0;
- dst = per_cpu(zswap_dstmem, cpu);
- kfree(dst);
- per_cpu(zswap_dstmem, cpu) = NULL;
+#ifdef CONFIG_MEMCG_KMEM
+ mem_cgroup_flush_stats(memcg);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+#else
+ /* use pool stats instead of memcg stats */
+ nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
+ nr_stored = atomic_read(&pool->nr_stored);
+#endif
- return 0;
+ if (!nr_stored)
+ return 0;
+
+ nr_protected =
+ atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
+ /*
+ * Subtract the lru size by an estimate of the number of pages
+ * that should be protected.
+ */
+ nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+
+ /*
+ * Scale the number of freeable pages by the memory saving factor.
+ * This ensures that the better zswap compresses memory, the fewer
+ * pages we will evict to swap (as it will otherwise incur IO for
+ * relatively small memory saving).
+ */
+ return mult_frac(nr_freeable, nr_backing, nr_stored);
+}
+
+static void zswap_alloc_shrinker(struct zswap_pool *pool)
+{
+ pool->shrinker =
+ shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
+ if (!pool->shrinker)
+ return;
+
+ pool->shrinker->private_data = pool;
+ pool->shrinker->scan_objects = zswap_shrinker_scan;
+ pool->shrinker->count_objects = zswap_shrinker_count;
+ pool->shrinker->batch = 0;
+ pool->shrinker->seeks = DEFAULT_SEEKS;
}
+/*********************************
+* per-cpu code
+**********************************/
static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
struct crypto_acomp *acomp;
struct acomp_req *req;
+ int ret;
+
+ mutex_init(&acomp_ctx->mutex);
+
+ acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!acomp_ctx->buffer)
+ return -ENOMEM;
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
if (IS_ERR(acomp)) {
pr_err("could not alloc crypto acomp %s : %ld\n",
pool->tfm_name, PTR_ERR(acomp));
- return PTR_ERR(acomp);
+ ret = PTR_ERR(acomp);
+ goto acomp_fail;
}
acomp_ctx->acomp = acomp;
@@ -508,8 +721,8 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
if (!req) {
pr_err("could not alloc crypto acomp_request %s\n",
pool->tfm_name);
- crypto_free_acomp(acomp_ctx->acomp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto req_fail;
}
acomp_ctx->req = req;
@@ -522,10 +735,13 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &acomp_ctx->wait);
- acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
- acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
-
return 0;
+
+req_fail:
+ crypto_free_acomp(acomp_ctx->acomp);
+acomp_fail:
+ kfree(acomp_ctx->buffer);
+ return ret;
}
static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
@@ -538,6 +754,7 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
acomp_request_free(acomp_ctx->req);
if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
crypto_free_acomp(acomp_ctx->acomp);
+ kfree(acomp_ctx->buffer);
}
return 0;
@@ -632,21 +849,16 @@ static void zswap_invalidate_entry(struct zswap_tree *tree,
zswap_entry_put(tree, entry);
}
-static int zswap_reclaim_entry(struct zswap_pool *pool)
+static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
+ spinlock_t *lock, void *arg)
{
- struct zswap_entry *entry;
+ struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
+ bool *encountered_page_in_swapcache = (bool *)arg;
struct zswap_tree *tree;
pgoff_t swpoffset;
- int ret;
+ enum lru_status ret = LRU_REMOVED_RETRY;
+ int writeback_result;
- /* Get an entry off the LRU */
- spin_lock(&pool->lru_lock);
- if (list_empty(&pool->lru)) {
- spin_unlock(&pool->lru_lock);
- return -EINVAL;
- }
- entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
- list_del_init(&entry->lru);
/*
* Once the lru lock is dropped, the entry might get freed. The
* swpoffset is copied to the stack, and entry isn't deref'd again
@@ -654,29 +866,48 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
*/
swpoffset = swp_offset(entry->swpentry);
tree = zswap_trees[swp_type(entry->swpentry)];
- spin_unlock(&pool->lru_lock);
+ list_lru_isolate(l, item);
+ /*
+ * It's safe to drop the lock here because we return either
+ * LRU_REMOVED_RETRY or LRU_RETRY.
+ */
+ spin_unlock(lock);
/* Check for invalidate() race */
spin_lock(&tree->lock);
- if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
- ret = -EAGAIN;
+ if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
goto unlock;
- }
+
/* Hold a reference to prevent a free during writeback */
zswap_entry_get(entry);
spin_unlock(&tree->lock);
- ret = zswap_writeback_entry(entry, tree);
+ writeback_result = zswap_writeback_entry(entry, tree);
spin_lock(&tree->lock);
- if (ret) {
- /* Writeback failed, put entry back on LRU */
- spin_lock(&pool->lru_lock);
- list_move(&entry->lru, &pool->lru);
- spin_unlock(&pool->lru_lock);
+ if (writeback_result) {
+ zswap_reject_reclaim_fail++;
+ zswap_lru_putback(&entry->pool->list_lru, entry);
+ ret = LRU_RETRY;
+
+ /*
+ * Encountering a page already in swap cache is a sign that we are shrinking
+ * into the warmer region. We should terminate shrinking (if we're in the dynamic
+ * shrinker context).
+ */
+ if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
+ ret = LRU_SKIP;
+ *encountered_page_in_swapcache = true;
+ }
+
goto put_unlock;
}
+ zswap_written_back_pages++;
+
+ if (entry->objcg)
+ count_objcg_event(entry->objcg, ZSWPWB);
+ count_vm_event(ZSWPWB);
/*
* Writeback started successfully, the page now belongs to the
* swapcache. Drop the entry from zswap - unless invalidate already
@@ -689,24 +920,94 @@ put_unlock:
zswap_entry_put(tree, entry);
unlock:
spin_unlock(&tree->lock);
- return ret ? -EAGAIN : 0;
+ spin_lock(lock);
+ return ret;
+}
+
+static int shrink_memcg(struct mem_cgroup *memcg)
+{
+ struct zswap_pool *pool;
+ int nid, shrunk = 0;
+
+ if (!mem_cgroup_zswap_writeback_enabled(memcg))
+ return -EINVAL;
+
+ /*
+ * Skip zombies because their LRUs are reparented and we would be
+ * reclaiming from the parent instead of the dead memcg.
+ */
+ if (memcg && !mem_cgroup_online(memcg))
+ return -ENOENT;
+
+ pool = zswap_pool_current_get();
+ if (!pool)
+ return -EINVAL;
+
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ unsigned long nr_to_walk = 1;
+
+ shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
+ &shrink_memcg_cb, NULL, &nr_to_walk);
+ }
+ zswap_pool_put(pool);
+ return shrunk ? 0 : -EAGAIN;
}
static void shrink_worker(struct work_struct *w)
{
struct zswap_pool *pool = container_of(w, typeof(*pool),
shrink_work);
+ struct mem_cgroup *memcg;
int ret, failures = 0;
+ /* global reclaim will select cgroup in a round-robin fashion. */
do {
- ret = zswap_reclaim_entry(pool);
- if (ret) {
- zswap_reject_reclaim_fail++;
- if (ret != -EAGAIN)
+ spin_lock(&zswap_pools_lock);
+ pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
+ memcg = pool->next_shrink;
+
+ /*
+ * We need to retry if we have gone through a full round trip, or if we
+ * got an offline memcg (or else we risk undoing the effect of the
+ * zswap memcg offlining cleanup callback). This is not catastrophic
+ * per se, but it will keep the now offlined memcg hostage for a while.
+ *
+ * Note that if we got an online memcg, we will keep the extra
+ * reference in case the original reference obtained by mem_cgroup_iter
+ * is dropped by the zswap memcg offlining callback, ensuring that the
+ * memcg is not killed when we are reclaiming.
+ */
+ if (!memcg) {
+ spin_unlock(&zswap_pools_lock);
+ if (++failures == MAX_RECLAIM_RETRIES)
break;
+
+ goto resched;
+ }
+
+ if (!mem_cgroup_tryget_online(memcg)) {
+ /* drop the reference from mem_cgroup_iter() */
+ mem_cgroup_iter_break(NULL, memcg);
+ pool->next_shrink = NULL;
+ spin_unlock(&zswap_pools_lock);
+
if (++failures == MAX_RECLAIM_RETRIES)
break;
+
+ goto resched;
}
+ spin_unlock(&zswap_pools_lock);
+
+ ret = shrink_memcg(memcg);
+ /* drop the extra reference */
+ mem_cgroup_put(memcg);
+
+ if (ret == -EINVAL)
+ break;
+ if (ret && ++failures == MAX_RECLAIM_RETRIES)
+ break;
+
+resched:
cond_resched();
} while (!zswap_can_accept());
zswap_pool_put(pool);
@@ -760,6 +1061,11 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
&pool->node);
if (ret)
goto error;
+
+ zswap_alloc_shrinker(pool);
+ if (!pool->shrinker)
+ goto error;
+
pr_debug("using %s compressor\n", pool->tfm_name);
/* being the current pool takes 1 ref; this func expects the
@@ -767,14 +1073,19 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
*/
kref_init(&pool->kref);
INIT_LIST_HEAD(&pool->list);
- INIT_LIST_HEAD(&pool->lru);
- spin_lock_init(&pool->lru_lock);
+ if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
+ goto lru_fail;
+ shrinker_register(pool->shrinker);
INIT_WORK(&pool->shrink_work, shrink_worker);
+ atomic_set(&pool->nr_stored, 0);
zswap_pool_debug("created", pool);
return pool;
+lru_fail:
+ list_lru_destroy(&pool->list_lru);
+ shrinker_free(pool->shrinker);
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
@@ -832,8 +1143,16 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
zswap_pool_debug("destroying", pool);
+ shrinker_free(pool->shrinker);
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
free_percpu(pool->acomp_ctx);
+ list_lru_destroy(&pool->list_lru);
+
+ spin_lock(&zswap_pools_lock);
+ mem_cgroup_iter_break(NULL, pool->next_shrink);
+ pool->next_shrink = NULL;
+ spin_unlock(&zswap_pools_lock);
+
for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
zpool_destroy_pool(pool->zpools[i]);
kfree(pool);
@@ -1040,18 +1359,47 @@ static int zswap_enabled_param_set(const char *val,
return ret;
}
+static void __zswap_load(struct zswap_entry *entry, struct page *page)
+{
+ struct zpool *zpool = zswap_find_zpool(entry);
+ struct scatterlist input, output;
+ struct crypto_acomp_ctx *acomp_ctx;
+ u8 *src;
+
+ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+ mutex_lock(&acomp_ctx->mutex);
+
+ src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
+ if (!zpool_can_sleep_mapped(zpool)) {
+ memcpy(acomp_ctx->buffer, src, entry->length);
+ src = acomp_ctx->buffer;
+ zpool_unmap_handle(zpool, entry->handle);
+ }
+
+ sg_init_one(&input, src, entry->length);
+ sg_init_table(&output, 1);
+ sg_set_page(&output, page, PAGE_SIZE, 0);
+ acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
+ BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+ BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+ mutex_unlock(&acomp_ctx->mutex);
+
+ if (zpool_can_sleep_mapped(zpool))
+ zpool_unmap_handle(zpool, entry->handle);
+}
+
/*********************************
* writeback code
**********************************/
/*
- * Attempts to free an entry by adding a page to the swap cache,
- * decompressing the entry data into the page, and issuing a
- * bio write to write the page back to the swap device.
+ * Attempts to free an entry by adding a folio to the swap cache,
+ * decompressing the entry data into the folio, and issuing a
+ * bio write to write the folio back to the swap device.
*
- * This can be thought of as a "resumed writeback" of the page
+ * This can be thought of as a "resumed writeback" of the folio
* to the swap device. We are basically resuming the same swap
* writeback path that was intercepted with the zswap_store()
- * in the first place. After the page has been decompressed into
+ * in the first place. After the folio has been decompressed into
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
@@ -1059,108 +1407,58 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct zswap_tree *tree)
{
swp_entry_t swpentry = entry->swpentry;
- struct page *page;
+ struct folio *folio;
struct mempolicy *mpol;
- struct scatterlist input, output;
- struct crypto_acomp_ctx *acomp_ctx;
- struct zpool *pool = zswap_find_zpool(entry);
- bool page_was_allocated;
- u8 *src, *tmp = NULL;
- unsigned int dlen;
- int ret;
+ bool folio_was_allocated;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
- if (!zpool_can_sleep_mapped(pool)) {
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- }
-
- /* try to allocate swap cache page */
+ /* try to allocate swap cache folio */
mpol = get_task_policy(current);
- page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
- NO_INTERLEAVE_INDEX, &page_was_allocated);
- if (!page) {
- ret = -ENOMEM;
- goto fail;
- }
+ folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+ NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ if (!folio)
+ return -ENOMEM;
- /* Found an existing page, we raced with load/swapin */
- if (!page_was_allocated) {
- put_page(page);
- ret = -EEXIST;
- goto fail;
+ /*
+ * Found an existing folio, we raced with load/swapin. We generally
+ * writeback cold folios from zswap, and swapin means the folio just
+ * became hot. Skip this folio and let the caller find another one.
+ */
+ if (!folio_was_allocated) {
+ folio_put(folio);
+ return -EEXIST;
}
/*
- * Page is locked, and the swapcache is now secured against
+ * folio is locked, and the swapcache is now secured against
* concurrent swapping to and from the slot. Verify that the
* swap entry hasn't been invalidated and recycled behind our
* backs (our zswap_entry reference doesn't prevent that), to
- * avoid overwriting a new swap page with old compressed data.
+ * avoid overwriting a new swap folio with old compressed data.
*/
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
- delete_from_swap_cache(page_folio(page));
- ret = -ENOMEM;
- goto fail;
+ delete_from_swap_cache(folio);
+ return -ENOMEM;
}
spin_unlock(&tree->lock);
- /* decompress */
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- dlen = PAGE_SIZE;
-
- src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
- if (!zpool_can_sleep_mapped(pool)) {
- memcpy(tmp, src, entry->length);
- src = tmp;
- zpool_unmap_handle(pool, entry->handle);
- }
-
- mutex_lock(acomp_ctx->mutex);
- sg_init_one(&input, src, entry->length);
- sg_init_table(&output, 1);
- sg_set_page(&output, page, PAGE_SIZE, 0);
- acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
- ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
- dlen = acomp_ctx->req->dlen;
- mutex_unlock(acomp_ctx->mutex);
+ __zswap_load(entry, &folio->page);
- if (!zpool_can_sleep_mapped(pool))
- kfree(tmp);
- else
- zpool_unmap_handle(pool, entry->handle);
-
- BUG_ON(ret);
- BUG_ON(dlen != PAGE_SIZE);
-
- /* page is up to date */
- SetPageUptodate(page);
+ /* folio is up to date */
+ folio_mark_uptodate(folio);
/* move it to the tail of the inactive list after end_writeback */
- SetPageReclaim(page);
+ folio_set_reclaim(folio);
/* start writeback */
- __swap_writepage(page, &wbc);
- put_page(page);
- zswap_written_back_pages++;
+ __swap_writepage(folio, &wbc);
+ folio_put(folio);
- return ret;
-
-fail:
- if (!zpool_can_sleep_mapped(pool))
- kfree(tmp);
-
- /*
- * If we get here because the page is already in swapcache, a
- * load may be happening concurrently. It is safe and okay to
- * not free the entry. It is also okay to return !0.
- */
- return ret;
+ return 0;
}
static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
@@ -1204,6 +1502,7 @@ bool zswap_store(struct folio *folio)
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
struct obj_cgroup *objcg = NULL;
+ struct mem_cgroup *memcg = NULL;
struct zswap_pool *pool;
struct zpool *zpool;
unsigned int dlen = PAGE_SIZE;
@@ -1235,15 +1534,15 @@ bool zswap_store(struct folio *folio)
zswap_invalidate_entry(tree, dupentry);
}
spin_unlock(&tree->lock);
-
- /*
- * XXX: zswap reclaim does not work with cgroups yet. Without a
- * cgroup-aware entry LRU, we will push out entries system-wide based on
- * local cgroup limits.
- */
objcg = get_obj_cgroup_from_folio(folio);
- if (objcg && !obj_cgroup_may_zswap(objcg))
- goto reject;
+ if (objcg && !obj_cgroup_may_zswap(objcg)) {
+ memcg = get_mem_cgroup_from_objcg(objcg);
+ if (shrink_memcg(memcg)) {
+ mem_cgroup_put(memcg);
+ goto reject;
+ }
+ mem_cgroup_put(memcg);
+ }
/* reclaim space if needed */
if (zswap_is_full()) {
@@ -1260,23 +1559,23 @@ bool zswap_store(struct folio *folio)
}
/* allocate entry */
- entry = zswap_entry_cache_alloc(GFP_KERNEL);
+ entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
if (!entry) {
zswap_reject_kmemcache_fail++;
goto reject;
}
if (zswap_same_filled_pages_enabled) {
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
if (zswap_is_page_same_filled(src, &value)) {
- kunmap_atomic(src);
+ kunmap_local(src);
entry->swpentry = swp_entry(type, offset);
entry->length = 0;
entry->value = value;
atomic_inc(&zswap_same_filled_pages);
goto insert_entry;
}
- kunmap_atomic(src);
+ kunmap_local(src);
}
if (!zswap_non_same_filled_pages_enabled)
@@ -1287,16 +1586,29 @@ bool zswap_store(struct folio *folio)
if (!entry->pool)
goto freepage;
+ if (objcg) {
+ memcg = get_mem_cgroup_from_objcg(objcg);
+ if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
+ mem_cgroup_put(memcg);
+ goto put_pool;
+ }
+ mem_cgroup_put(memcg);
+ }
+
/* compress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- mutex_lock(acomp_ctx->mutex);
+ mutex_lock(&acomp_ctx->mutex);
- dst = acomp_ctx->dstmem;
+ dst = acomp_ctx->buffer;
sg_init_table(&input, 1);
- sg_set_page(&input, page, PAGE_SIZE, 0);
+ sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
- /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
+ /*
+ * We need PAGE_SIZE * 2 here since there maybe over-compression case,
+ * and hardware-accelerators may won't check the dst buffer size, so
+ * giving the dst buffer with enough length to avoid buffer overflow.
+ */
sg_init_one(&output, dst, PAGE_SIZE * 2);
acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
/*
@@ -1336,7 +1648,7 @@ bool zswap_store(struct folio *folio)
buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
memcpy(buf, dst, dlen);
zpool_unmap_handle(zpool, handle);
- mutex_unlock(acomp_ctx->mutex);
+ mutex_unlock(&acomp_ctx->mutex);
/* populate entry */
entry->swpentry = swp_entry(type, offset);
@@ -1365,9 +1677,9 @@ insert_entry:
zswap_invalidate_entry(tree, dupentry);
}
if (entry->length) {
- spin_lock(&entry->pool->lru_lock);
- list_add(&entry->lru, &entry->pool->lru);
- spin_unlock(&entry->pool->lru_lock);
+ INIT_LIST_HEAD(&entry->lru);
+ zswap_lru_add(&entry->pool->list_lru, entry);
+ atomic_inc(&entry->pool->nr_stored);
}
spin_unlock(&tree->lock);
@@ -1379,7 +1691,8 @@ insert_entry:
return true;
put_dstmem:
- mutex_unlock(acomp_ctx->mutex);
+ mutex_unlock(&acomp_ctx->mutex);
+put_pool:
zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
@@ -1403,12 +1716,7 @@ bool zswap_load(struct folio *folio)
struct page *page = &folio->page;
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
- struct scatterlist input, output;
- struct crypto_acomp_ctx *acomp_ctx;
- u8 *src, *dst, *tmp;
- struct zpool *zpool;
- unsigned int dlen;
- bool ret;
+ u8 *dst;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
@@ -1421,67 +1729,30 @@ bool zswap_load(struct folio *folio)
}
spin_unlock(&tree->lock);
- if (!entry->length) {
- dst = kmap_atomic(page);
+ if (entry->length)
+ __zswap_load(entry, page);
+ else {
+ dst = kmap_local_page(page);
zswap_fill_page(dst, entry->value);
- kunmap_atomic(dst);
- ret = true;
- goto stats;
- }
-
- zpool = zswap_find_zpool(entry);
- if (!zpool_can_sleep_mapped(zpool)) {
- tmp = kmalloc(entry->length, GFP_KERNEL);
- if (!tmp) {
- ret = false;
- goto freeentry;
- }
- }
-
- /* decompress */
- dlen = PAGE_SIZE;
- src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
-
- if (!zpool_can_sleep_mapped(zpool)) {
- memcpy(tmp, src, entry->length);
- src = tmp;
- zpool_unmap_handle(zpool, entry->handle);
+ kunmap_local(dst);
}
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- mutex_lock(acomp_ctx->mutex);
- sg_init_one(&input, src, entry->length);
- sg_init_table(&output, 1);
- sg_set_page(&output, page, PAGE_SIZE, 0);
- acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
- if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait))
- WARN_ON(1);
- mutex_unlock(acomp_ctx->mutex);
-
- if (zpool_can_sleep_mapped(zpool))
- zpool_unmap_handle(zpool, entry->handle);
- else
- kfree(tmp);
-
- ret = true;
-stats:
count_vm_event(ZSWPIN);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
-freeentry:
+
spin_lock(&tree->lock);
- if (ret && zswap_exclusive_loads_enabled) {
+ if (zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
folio_mark_dirty(folio);
} else if (entry->length) {
- spin_lock(&entry->pool->lru_lock);
- list_move(&entry->lru, &entry->pool->lru);
- spin_unlock(&entry->pool->lru_lock);
+ zswap_lru_del(&entry->pool->list_lru, entry);
+ zswap_lru_add(&entry->pool->list_lru, entry);
}
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
- return ret;
+ return true;
}
void zswap_invalidate(int type, pgoff_t offset)
@@ -1595,13 +1866,6 @@ static int zswap_setup(void)
goto cache_fail;
}
- ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
- zswap_dstmem_prepare, zswap_dstmem_dead);
- if (ret) {
- pr_err("dstmem alloc failed\n");
- goto dstmem_fail;
- }
-
ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
"mm/zswap_pool:prepare",
zswap_cpu_comp_prepare,
@@ -1633,8 +1897,6 @@ fallback_fail:
if (pool)
zswap_pool_destroy(pool);
hp_fail:
- cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
-dstmem_fail:
kmem_cache_destroy(zswap_entry_cache);
cache_fail:
/* if built-in, we aren't unloaded on failure; don't allow use */
diff --git a/net/compat.c b/net/compat.c
index 6564720f32b7..485db8ee9b28 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -297,7 +297,7 @@ void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm)
int err = 0, i;
for (i = 0; i < fdmax; i++) {
- err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags);
+ err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags);
if (err < 0)
break;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 7dc47c17d863..db3f7cd519c2 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -325,7 +325,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
}
for (i = 0; i < fdmax; i++) {
- err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags);
+ err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags);
if (err < 0)
break;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 94cc40a6f797..7ee648829849 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_unpoison_object_data(skbuff_cache, skb);
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
return skb;
}
@@ -1309,13 +1309,15 @@ static void napi_skb_cache_put(struct sk_buff *skb)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 i;
- kasan_poison_object_data(skbuff_cache, skb);
+ if (!kasan_mempool_poison_object(skb))
+ return;
+
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
- kasan_unpoison_object_data(skbuff_cache,
- nc->skb_cache[i]);
+ kasan_mempool_unpoison_object(nc->skb_cache[i],
+ kmem_cache_size(skbuff_cache));
kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
diff --git a/net/core/sock.c b/net/core/sock.c
index fef349dd72fa..d02534c77413 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1711,9 +1711,16 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
break;
case SO_TIMESTAMPING_OLD:
+ case SO_TIMESTAMPING_NEW:
lv = sizeof(v.timestamping);
- v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
- v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
+ /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
+ * returning the flags when they were set through the same option.
+ * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
+ */
+ if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
+ v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
+ v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
+ }
break;
case SO_RCVTIMEO_OLD:
@@ -2806,6 +2813,7 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
sockc->mark = *(u32 *)CMSG_DATA(cmsg);
break;
case SO_TIMESTAMPING_OLD:
+ case SO_TIMESTAMPING_NEW:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
return -EINVAL;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ff6838ca2e58..fce5668a6a3d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1849,7 +1849,6 @@ static int receive_fallback_to_copy(struct sock *sk,
{
unsigned long copy_address = (unsigned long)zc->copybuf_address;
struct msghdr msg = {};
- struct iovec iov;
int err;
zc->length = 0;
@@ -1858,8 +1857,8 @@ static int receive_fallback_to_copy(struct sock *sk,
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(ITER_DEST, (void __user *)copy_address,
- inq, &iov, &msg.msg_iter);
+ err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq,
+ &msg.msg_iter);
if (err)
return err;
@@ -1886,14 +1885,13 @@ static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
{
unsigned long copy_address = (unsigned long)zc->copybuf_address;
struct msghdr msg = {};
- struct iovec iov;
int err;
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(ITER_DEST, (void __user *)copy_address,
- copylen, &iov, &msg.msg_iter);
+ err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen,
+ &msg.msg_iter);
if (err)
return err;
err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c
index 55b310a722c7..8512cb09ebc0 100644
--- a/net/ipv4/tcp_sigpool.c
+++ b/net/ipv4/tcp_sigpool.c
@@ -162,9 +162,8 @@ int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size)
if (strcmp(cpool[i].alg, alg))
continue;
- if (kref_read(&cpool[i].kref) > 0)
- kref_get(&cpool[i].kref);
- else
+ /* pairs with tcp_sigpool_release() */
+ if (!kref_get_unless_zero(&cpool[i].kref))
kref_init(&cpool[i].kref);
ret = i;
goto out;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2692a7b24c40..733ace18806c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1407,23 +1407,15 @@ retry:
write_unlock_bh(&idev->lock);
- /* From RFC 4941:
- *
- * A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In
- * particular, an implementation must not create a temporary address
- * with a zero Preferred Lifetime.
- *
- * Clamp the preferred lifetime to a minimum of regen_advance, unless
- * that would exceed valid_lft.
- *
+ /* A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In particular,
+ * an implementation must not create a temporary address with a zero
+ * Preferred Lifetime.
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (cfg.preferred_lft <= regen_advance + age)
- cfg.preferred_lft = regen_advance + age + 1;
- if (cfg.preferred_lft > cfg.valid_lft) {
+ if (cfg.preferred_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 80aeb25f1b68..dce5606ed66d 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -1043,9 +1043,12 @@ void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
{
ieee80211_debugfs_remove_netdev(sdata);
ieee80211_debugfs_add_netdev(sdata, mld_vif);
- drv_vif_add_debugfs(sdata->local, sdata);
- if (!mld_vif)
- ieee80211_link_debugfs_drv_add(&sdata->deflink);
+
+ if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) {
+ drv_vif_add_debugfs(sdata->local, sdata);
+ if (!mld_vif)
+ ieee80211_link_debugfs_drv_add(&sdata->deflink);
+ }
}
void ieee80211_link_debugfs_add(struct ieee80211_link_data *link)
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index d3820333cd59..3b7f70073fc3 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -75,9 +75,9 @@ int drv_add_interface(struct ieee80211_local *local,
if (ret)
return ret;
- sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+ if (!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) {
+ sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
- if (!local->in_reconfig) {
drv_vif_add_debugfs(local, sdata);
/* initially vif is not MLD */
ieee80211_link_debugfs_drv_add(&sdata->deflink);
@@ -113,9 +113,13 @@ void drv_remove_interface(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
+ sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
+
+ /* Remove driver debugfs entries */
+ ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links);
+
trace_drv_remove_interface(local, sdata);
local->ops->remove_interface(&local->hw, &sdata->vif);
- sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
trace_drv_return_void(local);
}
@@ -534,7 +538,7 @@ int drv_change_vif_links(struct ieee80211_local *local,
if (ret)
return ret;
- if (!local->in_reconfig) {
+ if (!local->in_reconfig && !local->resuming) {
for_each_set_bit(link_id, &links_to_add,
IEEE80211_MLD_MAX_NUM_LINKS) {
link = rcu_access_pointer(sdata->link[link_id]);
@@ -590,7 +594,7 @@ int drv_change_sta_links(struct ieee80211_local *local,
return ret;
/* during reconfig don't add it to debugfs again */
- if (local->in_reconfig)
+ if (local->in_reconfig || local->resuming)
return 0;
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6d7684c35e93..852b3f4af000 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1982,6 +1982,17 @@ static void tcp_release_cb_override(struct sock *ssk)
tcp_release_cb(ssk);
}
+static int tcp_abort_override(struct sock *ssk, int err)
+{
+ /* closing a listener subflow requires a great deal of care.
+ * keep it simple and just prevent such operation
+ */
+ if (inet_sk_state_load(ssk) == TCP_LISTEN)
+ return -EINVAL;
+
+ return tcp_abort(ssk, err);
+}
+
static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
.name = "mptcp",
.owner = THIS_MODULE,
@@ -2026,6 +2037,7 @@ void __init mptcp_subflow_init(void)
tcp_prot_override = tcp_prot;
tcp_prot_override.release_cb = tcp_release_cb_override;
+ tcp_prot_override.diag_destroy = tcp_abort_override;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
@@ -2061,6 +2073,7 @@ void __init mptcp_subflow_init(void)
tcpv6_prot_override = tcpv6_prot;
tcpv6_prot_override.release_cb = tcp_release_cb_override;
+ tcpv6_prot_override.diag_destroy = tcp_abort_override;
#endif
mptcp_diag_subflow_init(&subflow_ulp_ops);
diff --git a/net/netfilter/nf_nat_ovs.c b/net/netfilter/nf_nat_ovs.c
index 551abd2da614..0f9a559f6207 100644
--- a/net/netfilter/nf_nat_ovs.c
+++ b/net/netfilter/nf_nat_ovs.c
@@ -75,9 +75,10 @@ static int nf_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
}
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
+out:
if (err == NF_ACCEPT)
*action |= BIT(maniptype);
-out:
+
return err;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c5c17c6e80ed..be04af433988 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -9887,7 +9887,7 @@ static void nft_set_commit_update(struct list_head *set_update_list)
list_for_each_entry_safe(set, next, set_update_list, pending_update) {
list_del_init(&set->pending_update);
- if (!set->ops->commit)
+ if (!set->ops->commit || set->dead)
continue;
set->ops->commit(set);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 8b536d7ef6c2..c3e635364701 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -158,7 +158,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
else {
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
return false;
- ptr = skb_network_header(skb) + nft_thoff(pkt);
+ ptr = skb->data + nft_thoff(pkt);
}
ptr += priv->offset;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index fccb3cf7749c..6475c7abc1fe 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -78,7 +78,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
case NFT_GOTO:
err = nf_tables_bind_chain(ctx, chain);
if (err < 0)
- return err;
+ goto err1;
break;
default:
break;
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index f1d5b8465217..a07c2216d28b 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
[NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
};
+static const struct netlbl_calipso_ops *calipso_ops;
+
+/**
+ * netlbl_calipso_ops_register - Register the CALIPSO operations
+ * @ops: ops to register
+ *
+ * Description:
+ * Register the CALIPSO packet engine operations.
+ *
+ */
+const struct netlbl_calipso_ops *
+netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+{
+ return xchg(&calipso_ops, ops);
+}
+EXPORT_SYMBOL(netlbl_calipso_ops_register);
+
+static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+{
+ return READ_ONCE(calipso_ops);
+}
+
/* NetLabel Command Handlers
*/
/**
@@ -96,15 +118,18 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
*
*/
static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
-
{
int ret_val = -EINVAL;
struct netlbl_audit audit_info;
+ const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();
if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
!info->attrs[NLBL_CALIPSO_A_MTYPE])
return -EINVAL;
+ if (!ops)
+ return -EOPNOTSUPP;
+
netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
case CALIPSO_MAP_PASS:
@@ -363,28 +388,6 @@ int __init netlbl_calipso_genl_init(void)
return genl_register_family(&netlbl_calipso_gnl_family);
}
-static const struct netlbl_calipso_ops *calipso_ops;
-
-/**
- * netlbl_calipso_ops_register - Register the CALIPSO operations
- * @ops: ops to register
- *
- * Description:
- * Register the CALIPSO packet engine operations.
- *
- */
-const struct netlbl_calipso_ops *
-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
-{
- return xchg(&calipso_ops, ops);
-}
-EXPORT_SYMBOL(netlbl_calipso_ops_register);
-
-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
-{
- return READ_ONCE(calipso_ops);
-}
-
/**
* calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
* @doi_def: the DOI structure
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 1dac28136e6a..18be13fb9b75 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -145,6 +145,13 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
{
+ /* Since using nfc_llcp_local may result in usage of nfc_dev, whenever
+ * we hold a reference to local, we also need to hold a reference to
+ * the device to avoid UAF.
+ */
+ if (!nfc_get_device(local->dev->idx))
+ return NULL;
+
kref_get(&local->ref);
return local;
@@ -177,10 +184,18 @@ static void local_release(struct kref *ref)
int nfc_llcp_local_put(struct nfc_llcp_local *local)
{
+ struct nfc_dev *dev;
+ int ret;
+
if (local == NULL)
return 0;
- return kref_put(&local->ref, local_release);
+ dev = local->dev;
+
+ ret = kref_put(&local->ref, local_release);
+ nfc_put_device(dev);
+
+ return ret;
}
static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
@@ -959,8 +974,17 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
}
new_sock = nfc_llcp_sock(new_sk);
- new_sock->dev = local->dev;
+
new_sock->local = nfc_llcp_local_get(local);
+ if (!new_sock->local) {
+ reason = LLCP_DM_REJ;
+ sock_put(&new_sock->sk);
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ new_sock->dev = local->dev;
new_sock->rw = sock->rw;
new_sock->miux = sock->miux;
new_sock->nfc_protocol = sock->nfc_protocol;
@@ -1597,7 +1621,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
if (local == NULL)
return -ENOMEM;
- local->dev = ndev;
+ /* As we are going to initialize local's refcount, we need to get the
+ * nfc_dev to avoid UAF, otherwise there is no point in continuing.
+ * See nfc_llcp_local_get().
+ */
+ local->dev = nfc_get_device(ndev->idx);
+ if (!local->dev) {
+ kfree(local);
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&local->list);
kref_init(&local->ref);
mutex_init(&local->sdp_lock);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 645677f84dba..819157bbb5a2 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -796,6 +796,11 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (sk->sk_type == SOCK_DGRAM) {
+ if (sk->sk_state != LLCP_BOUND) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
+
DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr,
msg->msg_name);
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index b1db0b519179..abb0c70ffc8b 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -512,7 +512,9 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
if (!node)
return -ENOENT;
- return server_del(node, port, true);
+ server_del(node, port, true);
+
+ return 0;
}
static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 6f3c1fb2fb44..f176afb70559 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -97,8 +97,10 @@ retry:
static void em_text_destroy(struct tcf_ematch *m)
{
- if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+ if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) {
textsearch_destroy(EM_TEXT_PRIV(m)->config);
+ kfree(EM_TEXT_PRIV(m));
+ }
}
static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index a584613aca12..5cc376834c57 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -153,8 +153,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
.lnk[0].link_id = link->link_id,
};
- memcpy(linfo.lnk[0].ibname,
- smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
+ memcpy(linfo.lnk[0].ibname, link->smcibdev->ibdev->name,
sizeof(link->smcibdev->ibdev->name));
smc_gid_be16_convert(linfo.lnk[0].gid, link->gid);
smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid);
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 89981dbe46c9..97704a9e84c7 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -844,7 +844,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
goto out;
/* the calculated number of cq entries fits to mlx5 cq allocation */
cqe_size_order = cache_line_size() == 128 ? 7 : 6;
- smc_order = MAX_ORDER - cqe_size_order;
+ smc_order = MAX_PAGE_ORDER - cqe_size_order;
if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
diff --git a/net/socket.c b/net/socket.c
index 3379c64217a4..ed3df2f749bf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -757,6 +757,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
struct sockaddr_storage address;
+ int save_len = msg->msg_namelen;
int ret;
if (msg->msg_name) {
@@ -766,6 +767,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
ret = __sock_sendmsg(sock, msg);
msg->msg_name = save_addr;
+ msg->msg_namelen = save_len;
return ret;
}
@@ -2161,10 +2163,9 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
struct sockaddr_storage address;
int err;
struct msghdr msg;
- struct iovec iov;
int fput_needed;
- err = import_single_range(ITER_SOURCE, buff, len, &iov, &msg.msg_iter);
+ err = import_ubuf(ITER_SOURCE, buff, len, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -2226,11 +2227,10 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
.msg_name = addr ? (struct sockaddr *)&address : NULL,
};
struct socket *sock;
- struct iovec iov;
int err, err2;
int fput_needed;
- err = import_single_range(ITER_DEST, ubuf, size, &iov, &msg.msg_iter);
+ err = import_ubuf(ITER_DEST, ubuf, size, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
diff --git a/samples/Kconfig b/samples/Kconfig
index b0ddf5f36738..b288d9991d27 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -285,6 +285,12 @@ config SAMPLE_KMEMLEAK
Build a sample program which have explicitly leaks memory to test
kmemleak
+config SAMPLE_CGROUP
+ bool "Build cgroup sample code"
+ depends on CGROUPS && CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build samples that demonstrate the usage of the cgroup API.
+
source "samples/rust/Kconfig"
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 0a551c2b33f4..b85fa64390c5 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -3,6 +3,7 @@
subdir-$(CONFIG_SAMPLE_AUXDISPLAY) += auxdisplay
subdir-$(CONFIG_SAMPLE_ANDROID_BINDERFS) += binderfs
+subdir-$(CONFIG_SAMPLE_CGROUP) += cgroup
obj-$(CONFIG_SAMPLE_CONFIGFS) += configfs/
obj-$(CONFIG_SAMPLE_CONNECTOR) += connector/
obj-$(CONFIG_SAMPLE_FANOTIFY_ERROR) += fanotify/
diff --git a/samples/cgroup/Makefile b/samples/cgroup/Makefile
new file mode 100644
index 000000000000..526c8569707c
--- /dev/null
+++ b/samples/cgroup/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+userprogs-always-y += cgroup_event_listener memcg_event_listener
+
+userccflags += -I usr/include
diff --git a/tools/cgroup/cgroup_event_listener.c b/samples/cgroup/cgroup_event_listener.c
index 3d70dc831a76..3d70dc831a76 100644
--- a/tools/cgroup/cgroup_event_listener.c
+++ b/samples/cgroup/cgroup_event_listener.c
diff --git a/samples/cgroup/memcg_event_listener.c b/samples/cgroup/memcg_event_listener.c
new file mode 100644
index 000000000000..a1667fe2489a
--- /dev/null
+++ b/samples/cgroup/memcg_event_listener.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * memcg_event_listener.c - Simple listener of memcg memory.events
+ *
+ * Copyright (c) 2023, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Dmitry Rokosov <ddrokosov@salutedevices.com>
+ */
+
+#include <err.h>
+#include <errno.h>
+#include <limits.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/inotify.h>
+#include <unistd.h>
+
+#define MEMCG_EVENTS "memory.events"
+
+/* Size of buffer to use when reading inotify events */
+#define INOTIFY_BUFFER_SIZE 8192
+
+#define INOTIFY_EVENT_NEXT(event, length) ({ \
+ (length) -= sizeof(*(event)) + (event)->len; \
+ (event)++; \
+})
+
+#define INOTIFY_EVENT_OK(event, length) ((length) >= (ssize_t)sizeof(*(event)))
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+
+struct memcg_counters {
+ long low;
+ long high;
+ long max;
+ long oom;
+ long oom_kill;
+ long oom_group_kill;
+};
+
+struct memcg_events {
+ struct memcg_counters counters;
+ char path[PATH_MAX];
+ int inotify_fd;
+ int inotify_wd;
+};
+
+static void print_memcg_counters(const struct memcg_counters *counters)
+{
+ printf("MEMCG events:\n");
+ printf("\tlow: %ld\n", counters->low);
+ printf("\thigh: %ld\n", counters->high);
+ printf("\tmax: %ld\n", counters->max);
+ printf("\toom: %ld\n", counters->oom);
+ printf("\toom_kill: %ld\n", counters->oom_kill);
+ printf("\toom_group_kill: %ld\n", counters->oom_group_kill);
+}
+
+static int get_memcg_counter(char *line, const char *name, long *counter)
+{
+ size_t len = strlen(name);
+ char *endptr;
+ long tmp;
+
+ if (memcmp(line, name, len)) {
+ warnx("Counter line %s has wrong name, %s is expected",
+ line, name);
+ return -EINVAL;
+ }
+
+ /* skip the whitespace delimiter */
+ len += 1;
+
+ errno = 0;
+ tmp = strtol(&line[len], &endptr, 10);
+ if (((tmp == LONG_MAX || tmp == LONG_MIN) && errno == ERANGE) ||
+ (errno && !tmp)) {
+ warnx("Failed to parse: %s", &line[len]);
+ return -ERANGE;
+ }
+
+ if (endptr == &line[len]) {
+ warnx("Not digits were found in line %s", &line[len]);
+ return -EINVAL;
+ }
+
+ if (!(*endptr == '\0' || (*endptr == '\n' && *++endptr == '\0'))) {
+ warnx("Further characters after number: %s", endptr);
+ return -EINVAL;
+ }
+
+ *counter = tmp;
+
+ return 0;
+}
+
+static int read_memcg_events(struct memcg_events *events, bool show_diff)
+{
+ FILE *fp = fopen(events->path, "re");
+ size_t i;
+ int ret = 0;
+ bool any_new_events = false;
+ char *line = NULL;
+ size_t len = 0;
+ struct memcg_counters new_counters;
+ struct memcg_counters *counters = &events->counters;
+ struct {
+ const char *name;
+ long *new;
+ long *old;
+ } map[] = {
+ {
+ .name = "low",
+ .new = &new_counters.low,
+ .old = &counters->low,
+ },
+ {
+ .name = "high",
+ .new = &new_counters.high,
+ .old = &counters->high,
+ },
+ {
+ .name = "max",
+ .new = &new_counters.max,
+ .old = &counters->max,
+ },
+ {
+ .name = "oom",
+ .new = &new_counters.oom,
+ .old = &counters->oom,
+ },
+ {
+ .name = "oom_kill",
+ .new = &new_counters.oom_kill,
+ .old = &counters->oom_kill,
+ },
+ {
+ .name = "oom_group_kill",
+ .new = &new_counters.oom_group_kill,
+ .old = &counters->oom_group_kill,
+ },
+ };
+
+ if (!fp) {
+ warn("Failed to open memcg events file %s", events->path);
+ return -EBADF;
+ }
+
+ /* Read new values for memcg counters */
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ ssize_t nread;
+
+ errno = 0;
+ nread = getline(&line, &len, fp);
+ if (nread == -1) {
+ if (errno) {
+ warn("Failed to read line for counter %s",
+ map[i].name);
+ ret = -EIO;
+ goto exit;
+ }
+
+ break;
+ }
+
+ ret = get_memcg_counter(line, map[i].name, map[i].new);
+ if (ret) {
+ warnx("Failed to get counter value from line %s", line);
+ goto exit;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ long diff;
+
+ if (*map[i].new > *map[i].old) {
+ diff = *map[i].new - *map[i].old;
+
+ if (show_diff)
+ printf("*** %ld MEMCG %s event%s, "
+ "change counter %ld => %ld\n",
+ diff, map[i].name,
+ (diff == 1) ? "" : "s",
+ *map[i].old, *map[i].new);
+
+ *map[i].old += diff;
+ any_new_events = true;
+ }
+ }
+
+ if (show_diff && !any_new_events)
+ printf("*** No new untracked memcg events available\n");
+
+exit:
+ free(line);
+ fclose(fp);
+
+ return ret;
+}
+
+static void process_memcg_events(struct memcg_events *events,
+ struct inotify_event *event)
+{
+ int ret;
+
+ if (events->inotify_wd != event->wd) {
+ warnx("Unknown inotify event %d, should be %d", event->wd,
+ events->inotify_wd);
+ return;
+ }
+
+ printf("Received event in %s:\n", events->path);
+
+ if (!(event->mask & IN_MODIFY)) {
+ warnx("No IN_MODIFY event, skip it");
+ return;
+ }
+
+ ret = read_memcg_events(events, /* show_diff = */true);
+ if (ret)
+ warnx("Can't read memcg events");
+}
+
+static void monitor_events(struct memcg_events *events)
+{
+ struct pollfd fds[1];
+ int ret;
+
+ printf("Started monitoring memory events from '%s'...\n", events->path);
+
+ fds[0].fd = events->inotify_fd;
+ fds[0].events = POLLIN;
+
+ for (;;) {
+ ret = poll(fds, ARRAY_SIZE(fds), -1);
+ if (ret < 0 && errno != EAGAIN)
+ err(EXIT_FAILURE, "Can't poll memcg events (%d)", ret);
+
+ if (fds[0].revents & POLLERR)
+ err(EXIT_FAILURE, "Got POLLERR during monitor events");
+
+ if (fds[0].revents & POLLIN) {
+ struct inotify_event *event;
+ char buffer[INOTIFY_BUFFER_SIZE];
+ ssize_t length;
+
+ length = read(fds[0].fd, buffer, INOTIFY_BUFFER_SIZE);
+ if (length <= 0)
+ continue;
+
+ event = (struct inotify_event *)buffer;
+ while (INOTIFY_EVENT_OK(event, length)) {
+ process_memcg_events(events, event);
+ event = INOTIFY_EVENT_NEXT(event, length);
+ }
+ }
+ }
+}
+
+static int initialize_memcg_events(struct memcg_events *events,
+ const char *cgroup)
+{
+ int ret;
+
+ memset(events, 0, sizeof(struct memcg_events));
+
+ ret = snprintf(events->path, PATH_MAX,
+ "/sys/fs/cgroup/%s/memory.events", cgroup);
+ if (ret >= PATH_MAX) {
+ warnx("Path to cgroup memory.events is too long");
+ return -EMSGSIZE;
+ } else if (ret < 0) {
+ warn("Can't generate cgroup event full name");
+ return ret;
+ }
+
+ ret = read_memcg_events(events, /* show_diff = */false);
+ if (ret) {
+ warnx("Failed to read initial memcg events state (%d)", ret);
+ return ret;
+ }
+
+ events->inotify_fd = inotify_init();
+ if (events->inotify_fd < 0) {
+ warn("Failed to setup new inotify device");
+ return -EMFILE;
+ }
+
+ events->inotify_wd = inotify_add_watch(events->inotify_fd,
+ events->path, IN_MODIFY);
+ if (events->inotify_wd < 0) {
+ warn("Couldn't add monitor in dir %s", events->path);
+ return -EIO;
+ }
+
+ printf("Initialized MEMCG events with counters:\n");
+ print_memcg_counters(&events->counters);
+
+ return 0;
+}
+
+static void cleanup_memcg_events(struct memcg_events *events)
+{
+ inotify_rm_watch(events->inotify_fd, events->inotify_wd);
+ close(events->inotify_fd);
+}
+
+int main(int argc, const char **argv)
+{
+ struct memcg_events events;
+ ssize_t ret;
+
+ if (argc != 2)
+ errx(EXIT_FAILURE, "Usage: %s <cgroup>", argv[0]);
+
+ ret = initialize_memcg_events(&events, argv[1]);
+ if (ret)
+ errx(EXIT_FAILURE, "Can't initialize memcg events (%zd)", ret);
+
+ monitor_events(&events);
+
+ cleanup_memcg_events(&events);
+
+ printf("Exiting memcg event listener...\n");
+
+ return EXIT_SUCCESS;
+}
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 69ba0281f9e0..2284b3751240 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -234,10 +234,10 @@ static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
if (is_msi(mdev_state)) {
if (mdev_state->msi_evtfd)
- eventfd_signal(mdev_state->msi_evtfd, 1);
+ eventfd_signal(mdev_state->msi_evtfd);
} else if (is_intx(mdev_state)) {
if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
- eventfd_signal(mdev_state->intx_evtfd, 1);
+ eventfd_signal(mdev_state->intx_evtfd);
mdev_state->intx_mask = true;
}
}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 2fe6f2828d37..c9725685aa76 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -17,6 +17,8 @@ KBUILD_CFLAGS += -Wno-format-security
KBUILD_CFLAGS += -Wno-trigraphs
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+KBUILD_CFLAGS += -Wmissing-declarations
+KBUILD_CFLAGS += -Wmissing-prototypes
ifneq ($(CONFIG_FRAME_WARN),0)
KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN)
@@ -95,10 +97,8 @@ export KBUILD_EXTRA_WARN
ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),)
KBUILD_CFLAGS += -Wextra -Wunused -Wno-unused-parameter
-KBUILD_CFLAGS += -Wmissing-declarations
KBUILD_CFLAGS += $(call cc-option, -Wrestrict)
KBUILD_CFLAGS += -Wmissing-format-attribute
-KBUILD_CFLAGS += -Wmissing-prototypes
KBUILD_CFLAGS += -Wold-style-definition
KBUILD_CFLAGS += -Wmissing-include-dirs
KBUILD_CFLAGS += $(call cc-option, -Wunused-but-set-variable)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 25fdb7fda112..a94ed6c46a6d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -4054,7 +4054,7 @@ sub process {
if ($prevline =~ /^[\+ ]};?\s*$/ &&
$line =~ /^\+/ &&
!($line =~ /^\+\s*$/ ||
- $line =~ /^\+\s*(?:EXPORT_SYMBOL|early_param)/ ||
+ $line =~ /^\+\s*(?:EXPORT_SYMBOL|early_param|ALLOW_ERROR_INJECTION)/ ||
$line =~ /^\+\s*MODULE_/i ||
$line =~ /^\+\s*\#\s*(?:end|elif|else)/ ||
$line =~ /^\+[a-z_]*init/ ||
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index f27d552aec43..14ce31f732ee 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -8,7 +8,6 @@
# Original idea maybe from Keith Owens
# s390 port and big speedup by Arnd Bergmann <arnd@bergmann-dalldorf.de>
# Mips port by Juan Quintela <quintela@mandrakesoft.com>
-# IA64 port via Andreas Dilger
# Arm port by Holger Schurig
# Random bits by Matt Mackall <mpm@selenic.com>
# M68k port by Geert Uytterhoeven and Andreas Schwab
@@ -16,9 +15,10 @@
# sparc port by Martin Habets <errandir_news@mph.eclipse.co.uk>
# ppc64le port by Breno Leitao <leitao@debian.org>
# riscv port by Wadim Mueller <wafgo01@gmail.com>
+# loongarch port by Youling Tang <tangyouling@kylinos.cn>
#
# Usage:
-# objdump -d vmlinux | scripts/checkstack.pl [arch]
+# objdump -d vmlinux | scripts/checkstack.pl [arch] [min_stack]
#
# TODO : Port to all architectures (one regex per arch)
@@ -47,7 +47,7 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
$min_stack = shift;
if ($min_stack eq "" || $min_stack !~ /^\d+$/) {
- $min_stack = 100;
+ $min_stack = 512;
}
$x = "[0-9a-f]"; # hex character
@@ -56,7 +56,7 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
if ($arch =~ '^(aarch|arm)64$') {
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
#a110: d11643ff sub sp, sp, #0x590
- $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
+ $re = qr/^.*stp.*sp, ?\#-([0-9]{1,8})\]\!/o;
$dre = qr/^.*sub.*sp, sp, #(0x$x{1,8})/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
@@ -68,25 +68,22 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
# 2f60: 48 81 ec e8 05 00 00 sub $0x5e8,%rsp
$re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%(e|r)sp$/o;
$dre = qr/^.*[as][du][db] (%.*),\%(e|r)sp$/o;
- } elsif ($arch eq 'ia64') {
- #e0000000044011fc: 01 0f fc 8c adds r12=-384,r12
- $re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o;
} elsif ($arch eq 'm68k') {
# 2b6c: 4e56 fb70 linkw %fp,#-1168
# 1df770: defc ffe4 addaw #-28,%sp
$re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o;
} elsif ($arch eq 'mips64') {
#8800402c: 67bdfff0 daddiu sp,sp,-16
- $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*daddiu.*sp,sp,-([0-9]{1,8})/o;
} elsif ($arch eq 'mips') {
#88003254: 27bdffe0 addiu sp,sp,-32
- $re = qr/.*addiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*addiu.*sp,sp,-([0-9]{1,8})/o;
} elsif ($arch eq 'nios2') {
#25a8: defffb04 addi sp,sp,-20
- $re = qr/.*addi.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*addi.*sp,sp,-([0-9]{1,8})/o;
} elsif ($arch eq 'openrisc') {
# c000043c: 9c 21 fe f0 l.addi r1,r1,-272
- $re = qr/.*l\.addi.*r1,r1,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*l\.addi.*r1,r1,-([0-9]{1,8})/o;
} elsif ($arch eq 'parisc' || $arch eq 'parisc64') {
$re = qr/.*ldo ($x{1,8})\(sp\),sp/o;
} elsif ($arch eq 'powerpc' || $arch =~ /^ppc(64)?(le)?$/ ) {
@@ -100,10 +97,13 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
$re = qr/.*(?:lay|ag?hi).*\%r15,-([0-9]+)(?:\(\%r15\))?$/o;
} elsif ($arch eq 'sparc' || $arch eq 'sparc64') {
# f0019d10: 9d e3 bf 90 save %sp, -112, %sp
- $re = qr/.*save.*%sp, -(([0-9]{2}|[3-9])[0-9]{2}), %sp/o;
+ $re = qr/.*save.*%sp, -([0-9]{1,8}), %sp/o;
} elsif ($arch =~ /^riscv(64)?$/) {
#ffffffff8036e868: c2010113 addi sp,sp,-992
- $re = qr/.*addi.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*addi.*sp,sp,-([0-9]{1,8})/o;
+ } elsif ($arch =~ /^loongarch(32|64)?$/) {
+ #9000000000224708: 02ff4063 addi.d $sp, $sp, -48(0xfd0)
+ $re = qr/.*addi\..*sp, .*sp, -([0-9]{1,8}).*/o;
} else {
print("wrong or unknown architecture \"$arch\"\n");
exit
@@ -189,5 +189,20 @@ if ($total_size > $min_stack) {
push @stack, "$intro$total_size\n";
}
-# Sort output by size (last field)
-print sort { ($b =~ /:\t*(\d+)$/)[0] <=> ($a =~ /:\t*(\d+)$/)[0] } @stack;
+# Sort output by size (last field) and function name if size is the same
+sub sort_lines {
+ my ($a, $b) = @_;
+
+ my $num_a = $1 if $a =~ /:\t*(\d+)$/;
+ my $num_b = $1 if $b =~ /:\t*(\d+)$/;
+ my $func_a = $1 if $a =~ / (.*):/;
+ my $func_b = $1 if $b =~ / (.*):/;
+
+ if ($num_a != $num_b) {
+ return $num_b <=> $num_a;
+ } else {
+ return $func_a cmp $func_b;
+ }
+}
+
+print sort { sort_lines($a, $b) } @stack;
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 564c5632e1a2..cb980b144ca1 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -291,6 +291,9 @@ handle_line() {
}
while read line; do
+ # Strip unexpected carriage return at end of line
+ line=${line%$'\r'}
+
# Let's see if we have an address in the line
if [[ $line =~ \[\<([^]]+)\>\] ]] ||
[[ $line =~ [^+\ ]+\+0x[0-9a-f]+/0x[0-9a-f]+ ]]; then
diff --git a/scripts/decodecode b/scripts/decodecode
index 8fe71c292381..6364218b2178 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -67,6 +67,7 @@ if [ -z "$ARCH" ]; then
case `uname -m` in
aarch64*) ARCH=arm64 ;;
arm*) ARCH=arm ;;
+ loongarch*) ARCH=loongarch ;;
esac
fi
@@ -98,6 +99,10 @@ disas() {
${CROSS_COMPILE}strip $t.o
fi
+ if [ "$ARCH" = "loongarch" ]; then
+ ${CROSS_COMPILE}strip $t.o
+ fi
+
if [ $pc_sub -ne 0 ]; then
if [ $PC ]; then
adj_vma=$(( $PC - $pc_sub ))
diff --git a/scripts/gdb/linux/page_owner.py b/scripts/gdb/linux/page_owner.py
index 844fd5d0c912..8e713a09cfe7 100644
--- a/scripts/gdb/linux/page_owner.py
+++ b/scripts/gdb/linux/page_owner.py
@@ -122,27 +122,24 @@ class DumpPageOwner(gdb.Command):
if not (page_ext['flags'] & (1 << PAGE_EXT_OWNER_ALLOCATED)):
gdb.write("page_owner is not allocated\n")
- try:
- page_owner = self.get_page_owner(page_ext)
- gdb.write("Page last allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
- (page_owner["order"], page_owner["gfp_mask"],\
- page_owner["pid"], page_owner["tgid"], page_owner["comm"],\
- page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
- gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
- if page_owner["handle"] == 0:
- gdb.write('page_owner allocation stack trace missing\n')
- else:
- stackdepot.stack_depot_print(page_owner["handle"])
+ page_owner = self.get_page_owner(page_ext)
+ gdb.write("Page last allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
+ (page_owner["order"], page_owner["gfp_mask"],\
+ page_owner["pid"], page_owner["tgid"], page_owner["comm"].string(),\
+ page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
+ gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
+ if page_owner["handle"] == 0:
+ gdb.write('page_owner allocation stack trace missing\n')
+ else:
+ stackdepot.stack_depot_print(page_owner["handle"])
- if page_owner["free_handle"] == 0:
- gdb.write('page_owner free stack trace missing\n')
- else:
- gdb.write('page last free stack trace:\n')
- stackdepot.stack_depot_print(page_owner["free_handle"])
- if page_owner['last_migrate_reason'] != -1:
- gdb.write('page has been migrated, last migrate reason: %s\n' % self.migrate_reason_names[page_owner['last_migrate_reason']])
- except:
- gdb.write("\n")
+ if page_owner["free_handle"] == 0:
+ gdb.write('page_owner free stack trace missing\n')
+ else:
+ gdb.write('page last free stack trace:\n')
+ stackdepot.stack_depot_print(page_owner["free_handle"])
+ if page_owner['last_migrate_reason'] != -1:
+ gdb.write('page has been migrated, last migrate reason: %s\n' % self.migrate_reason_names[page_owner['last_migrate_reason']])
def read_page_owner(self):
pfn = self.min_pfn
@@ -173,18 +170,13 @@ class DumpPageOwner(gdb.Command):
pfn += 1
continue
- try:
- page_owner = self.get_page_owner(page_ext)
- gdb.write("Page allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
- (page_owner["order"], page_owner["gfp_mask"],\
- page_owner["pid"], page_owner["tgid"], page_owner["comm"],\
- page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
- gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
- stackdepot.stack_depot_print(page_owner["handle"])
- pfn += (1 << page_owner["order"])
- continue
- except:
- gdb.write("\n")
- pfn += 1
+ page_owner = self.get_page_owner(page_ext)
+ gdb.write("Page allocated via order %d, gfp_mask: 0x%x, pid: %d, tgid: %d (%s), ts %u ns, free_ts %u ns\n" %\
+ (page_owner["order"], page_owner["gfp_mask"],\
+ page_owner["pid"], page_owner["tgid"], page_owner["comm"].string(),\
+ page_owner["ts_nsec"], page_owner["free_ts_nsec"]))
+ gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags']))
+ stackdepot.stack_depot_print(page_owner["handle"])
+ pfn += (1 << page_owner["order"])
DumpPageOwner()
diff --git a/scripts/gdb/linux/slab.py b/scripts/gdb/linux/slab.py
index f012ba38c7d9..0e2d93867fe2 100644
--- a/scripts/gdb/linux/slab.py
+++ b/scripts/gdb/linux/slab.py
@@ -228,8 +228,7 @@ def slabtrace(alloc, cache_name):
nr_cpu = gdb.parse_and_eval('__num_online_cpus')['counter']
if nr_cpu > 1:
gdb.write(" cpus=")
- for i in loc['cpus']:
- gdb.write("%d," % i)
+ gdb.write(','.join(str(cpu) for cpu in loc['cpus']))
gdb.write("\n")
if constants.LX_CONFIG_STACKDEPOT:
if loc['handle']:
diff --git a/scripts/gdb/linux/stackdepot.py b/scripts/gdb/linux/stackdepot.py
index 047d329a6a12..0281d9de4b7c 100644
--- a/scripts/gdb/linux/stackdepot.py
+++ b/scripts/gdb/linux/stackdepot.py
@@ -25,10 +25,10 @@ def stack_depot_fetch(handle):
handle_parts_t = gdb.lookup_type("union handle_parts")
parts = handle.cast(handle_parts_t)
offset = parts['offset'] << DEPOT_STACK_ALIGN
- pool_index_cached = gdb.parse_and_eval('pool_index')
+ pools_num = gdb.parse_and_eval('pools_num')
- if parts['pool_index'] > pool_index_cached:
- gdb.write("pool index %d out of bounds (%d) for stack id 0x%08x\n" % (parts['pool_index'], pool_index_cached, handle))
+ if parts['pool_index'] > pools_num:
+ gdb.write("pool index %d out of bounds (%d) for stack id 0x%08x\n" % (parts['pool_index'], pools_num, handle))
return gdb.Value(0), 0
stack_pools = gdb.parse_and_eval('stack_pools')
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 855c4863124b..edec60d39bbf 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -26,6 +26,7 @@ accelaration||acceleration
accelearion||acceleration
acceleratoin||acceleration
accelleration||acceleration
+accelrometer||accelerometer
accesing||accessing
accesnt||accent
accessable||accessible
@@ -137,6 +138,7 @@ anniversery||anniversary
annoucement||announcement
anomolies||anomalies
anomoly||anomaly
+anonynous||anonymous
anway||anyway
aplication||application
appearence||appearance
@@ -267,6 +269,7 @@ cadidate||candidate
cahces||caches
calender||calendar
calescing||coalescing
+calibraiton||calibration
calle||called
callibration||calibration
callled||called
@@ -288,6 +291,7 @@ capabitilies||capabilities
capablity||capability
capatibilities||capabilities
capapbilities||capabilities
+captuer||capture
caputure||capture
carefuly||carefully
cariage||carriage
@@ -340,6 +344,7 @@ comminucation||communication
commited||committed
commiting||committing
committ||commit
+commmand||command
commnunication||communication
commoditiy||commodity
comsume||consume
@@ -406,6 +411,7 @@ continious||continuous
continous||continuous
continously||continuously
continueing||continuing
+contiuous||continuous
contraints||constraints
contruct||construct
contol||control
@@ -757,6 +763,7 @@ hardward||hardware
havind||having
heirarchically||hierarchically
heirarchy||hierarchy
+heirachy||hierarchy
helpfull||helpful
hearbeat||heartbeat
heterogenous||heterogeneous
@@ -1199,6 +1206,7 @@ priting||printing
privilaged||privileged
privilage||privilege
priviledge||privilege
+priviledged||privileged
priviledges||privileges
privleges||privileges
probaly||probably
@@ -1251,6 +1259,7 @@ purgable||purgeable
pwoer||power
queing||queuing
quering||querying
+querrying||querying
queus||queues
randomally||randomly
raoming||roaming
@@ -1324,6 +1333,7 @@ reseting||resetting
reseved||reserved
reseverd||reserved
resizeable||resizable
+resonable||reasonable
resotre||restore
resouce||resource
resouces||resources
@@ -1427,6 +1437,7 @@ sliped||slipped
softwade||software
softwares||software
soley||solely
+soluation||solution
souce||source
speach||speech
specfic||specific
@@ -1458,6 +1469,7 @@ standart||standard
standy||standby
stardard||standard
staticly||statically
+statisitcs||statistics
statuss||status
stoped||stopped
stoping||stopping
@@ -1548,6 +1560,7 @@ threds||threads
threee||three
threshhold||threshold
thresold||threshold
+throtting||throttling
throught||through
tansition||transition
trackling||tracking
@@ -1571,6 +1584,7 @@ tranasction||transaction
tranceiver||transceiver
tranfer||transfer
tranmission||transmission
+tranport||transport
transcevier||transceiver
transciever||transceiver
transferd||transferred
diff --git a/security/Makefile b/security/Makefile
index 18121f8f85cd..59f238490665 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_KEYS) += keys/
# always enable default capabilities
obj-y += commoncap.o
+obj-$(CONFIG_SECURITY) += lsm_syscalls.o
obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 38650e52ef57..2d9f2a4b4519 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2373,6 +2373,7 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = {
static struct aa_sfs_entry aa_sfs_entry_mount[] = {
AA_SFS_FILE_STRING("mask", "mount umount pivot_root"),
+ AA_SFS_FILE_STRING("move_mount", "detached"),
{ }
};
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
index 31689437e0e1..03dbfdb2f2c0 100644
--- a/security/apparmor/include/procattr.h
+++ b/security/apparmor/include/procattr.h
@@ -11,7 +11,7 @@
#ifndef __AA_PROCATTR_H
#define __AA_PROCATTR_H
-int aa_getprocattr(struct aa_label *label, char **string);
+int aa_getprocattr(struct aa_label *label, char **string, bool newline);
int aa_setprocattr_changehat(char *args, size_t size, int flags);
#endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 4981bdf02993..e490a7000408 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -24,6 +24,7 @@
#include <linux/zstd.h>
#include <net/sock.h>
#include <uapi/linux/mount.h>
+#include <uapi/linux/lsm.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
@@ -775,6 +776,46 @@ static int apparmor_sb_pivotroot(const struct path *old_path,
return error;
}
+static int apparmor_getselfattr(unsigned int attr, struct lsm_ctx __user *lx,
+ size_t *size, u32 flags)
+{
+ int error = -ENOENT;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct aa_label *label = NULL;
+ char *value;
+
+ switch (attr) {
+ case LSM_ATTR_CURRENT:
+ label = aa_get_newest_label(cred_label(current_cred()));
+ break;
+ case LSM_ATTR_PREV:
+ if (ctx->previous)
+ label = aa_get_newest_label(ctx->previous);
+ break;
+ case LSM_ATTR_EXEC:
+ if (ctx->onexec)
+ label = aa_get_newest_label(ctx->onexec);
+ break;
+ default:
+ error = -EOPNOTSUPP;
+ break;
+ }
+
+ if (label) {
+ error = aa_getprocattr(label, &value, false);
+ if (error > 0)
+ error = lsm_fill_user_ctx(lx, size, value, error,
+ LSM_ID_APPARMOR, 0);
+ kfree(value);
+ }
+
+ aa_put_label(label);
+
+ if (error < 0)
+ return error;
+ return 1;
+}
+
static int apparmor_getprocattr(struct task_struct *task, const char *name,
char **value)
{
@@ -794,7 +835,7 @@ static int apparmor_getprocattr(struct task_struct *task, const char *name,
error = -EINVAL;
if (label)
- error = aa_getprocattr(label, value);
+ error = aa_getprocattr(label, value, true);
aa_put_label(label);
put_cred(cred);
@@ -802,8 +843,7 @@ static int apparmor_getprocattr(struct task_struct *task, const char *name,
return error;
}
-static int apparmor_setprocattr(const char *name, void *value,
- size_t size)
+static int do_setattr(u64 attr, void *value, size_t size)
{
char *command, *largs = NULL, *args = value;
size_t arg_size;
@@ -834,7 +874,7 @@ static int apparmor_setprocattr(const char *name, void *value,
goto out;
arg_size = size - (args - (largs ? largs : (char *) value));
- if (strcmp(name, "current") == 0) {
+ if (attr == LSM_ATTR_CURRENT) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_CHANGE_NOFLAGS);
@@ -849,7 +889,7 @@ static int apparmor_setprocattr(const char *name, void *value,
error = aa_change_profile(args, AA_CHANGE_STACK);
} else
goto fail;
- } else if (strcmp(name, "exec") == 0) {
+ } else if (attr == LSM_ATTR_EXEC) {
if (strcmp(command, "exec") == 0)
error = aa_change_profile(args, AA_CHANGE_ONEXEC);
else if (strcmp(command, "stack") == 0)
@@ -869,13 +909,42 @@ out:
fail:
ad.subj_label = begin_current_label_crit_section();
- ad.info = name;
+ if (attr == LSM_ATTR_CURRENT)
+ ad.info = "current";
+ else if (attr == LSM_ATTR_EXEC)
+ ad.info = "exec";
+ else
+ ad.info = "invalid";
ad.error = error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
end_current_label_crit_section(ad.subj_label);
goto out;
}
+static int apparmor_setselfattr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, u32 flags)
+{
+ int rc;
+
+ if (attr != LSM_ATTR_CURRENT && attr != LSM_ATTR_EXEC)
+ return -EOPNOTSUPP;
+
+ rc = do_setattr(attr, ctx->ctx, ctx->ctx_len);
+ if (rc > 0)
+ return 0;
+ return rc;
+}
+
+static int apparmor_setprocattr(const char *name, void *value,
+ size_t size)
+{
+ int attr = lsm_name_to_attr(name);
+
+ if (attr)
+ return do_setattr(attr, value, size);
+ return -EINVAL;
+}
+
/**
* apparmor_bprm_committing_creds - do task cleanup on committing new creds
* @bprm: binprm for the exec (NOT NULL)
@@ -1385,6 +1454,11 @@ struct lsm_blob_sizes apparmor_blob_sizes __ro_after_init = {
.lbs_task = sizeof(struct aa_task_ctx),
};
+static const struct lsm_id apparmor_lsmid = {
+ .name = "apparmor",
+ .id = LSM_ID_APPARMOR,
+};
+
static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1418,6 +1492,8 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(file_lock, apparmor_file_lock),
LSM_HOOK_INIT(file_truncate, apparmor_file_truncate),
+ LSM_HOOK_INIT(getselfattr, apparmor_getselfattr),
+ LSM_HOOK_INIT(setselfattr, apparmor_setselfattr),
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
@@ -2202,7 +2278,7 @@ static int __init apparmor_init(void)
goto buffers_out;
}
security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks),
- "apparmor");
+ &apparmor_lsmid);
/* Report that AppArmor successfully initialized */
apparmor_initialized = 1;
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index fb30204c761a..49fe8da6fea4 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -499,6 +499,10 @@ int aa_move_mount(const struct cred *subj_cred,
error = -ENOMEM;
if (!to_buffer || !from_buffer)
goto out;
+
+ if (!our_mnt(from_path->mnt))
+ /* moving a mount detached from the namespace */
+ from_path = NULL;
error = fn_for_each_confined(label, profile,
match_mnt(subj_cred, profile, to_path, to_buffer,
from_path, from_buffer,
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 197d41f9c32b..e3857e3d7c6c 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -20,6 +20,7 @@
* aa_getprocattr - Return the label information for @label
* @label: the label to print label info about (NOT NULL)
* @string: Returns - string containing the label info (NOT NULL)
+ * @newline: indicates that a newline should be added
*
* Requires: label != NULL && string != NULL
*
@@ -27,7 +28,7 @@
*
* Returns: size of string placed in @string else error code on failure
*/
-int aa_getprocattr(struct aa_label *label, char **string)
+int aa_getprocattr(struct aa_label *label, char **string, bool newline)
{
struct aa_ns *ns = labels_ns(label);
struct aa_ns *current_ns = aa_get_current_ns();
@@ -57,11 +58,12 @@ int aa_getprocattr(struct aa_label *label, char **string)
return len;
}
- (*string)[len] = '\n';
- (*string)[len + 1] = 0;
+ if (newline)
+ (*string)[len++] = '\n';
+ (*string)[len] = 0;
aa_put_ns(current_ns);
- return len + 1;
+ return len;
}
/**
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
index cfaf1d0e6a5f..57b9ffd53c98 100644
--- a/security/bpf/hooks.c
+++ b/security/bpf/hooks.c
@@ -5,6 +5,7 @@
*/
#include <linux/lsm_hooks.h>
#include <linux/bpf_lsm.h>
+#include <uapi/linux/lsm.h>
static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
@@ -15,9 +16,15 @@ static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = {
LSM_HOOK_INIT(task_free, bpf_task_storage_free),
};
+static const struct lsm_id bpf_lsmid = {
+ .name = "bpf",
+ .id = LSM_ID_BPF,
+};
+
static int __init bpf_lsm_init(void)
{
- security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
+ security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks),
+ &bpf_lsmid);
pr_info("LSM support for eBPF active\n");
return 0;
}
diff --git a/security/commoncap.c b/security/commoncap.c
index 8e8c630ce204..162d96b3a676 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -25,6 +25,7 @@
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/mnt_idmapping.h>
+#include <uapi/linux/lsm.h>
/*
* If a non-root user executes a setuid-root binary in
@@ -1440,6 +1441,11 @@ int cap_mmap_file(struct file *file, unsigned long reqprot,
#ifdef CONFIG_SECURITY
+static const struct lsm_id capability_lsmid = {
+ .name = "capability",
+ .id = LSM_ID_CAPABILITY,
+};
+
static struct security_hook_list capability_hooks[] __ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
@@ -1464,7 +1470,7 @@ static struct security_hook_list capability_hooks[] __ro_after_init = {
static int __init capability_init(void)
{
security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks),
- "capability");
+ &capability_lsmid);
return 0;
}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 894570fe39bc..cc7956d7878b 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -151,6 +151,17 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
return count;
}
+static int is_unsupported_fs(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ if (inode->i_sb->s_iflags & SB_I_EVM_UNSUPPORTED) {
+ pr_info_once("%s not supported\n", inode->i_sb->s_type->name);
+ return 1;
+ }
+ return 0;
+}
+
/*
* evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
*
@@ -181,6 +192,9 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
return iint->evm_status;
+ if (is_unsupported_fs(dentry))
+ return INTEGRITY_UNKNOWN;
+
/* if status is not PASS, try to check again - against -ENOMEM */
/* first need to know the sig type */
@@ -408,6 +422,9 @@ enum integrity_status evm_verifyxattr(struct dentry *dentry,
if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
return INTEGRITY_UNKNOWN;
+ if (is_unsupported_fs(dentry))
+ return INTEGRITY_UNKNOWN;
+
if (!iint) {
iint = integrity_iint_find(d_backing_inode(dentry));
if (!iint)
@@ -491,15 +508,21 @@ static int evm_protect_xattr(struct mnt_idmap *idmap,
if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (is_unsupported_fs(dentry))
+ return -EPERM;
} else if (!evm_protected_xattr(xattr_name)) {
if (!posix_xattr_acl(xattr_name))
return 0;
+ if (is_unsupported_fs(dentry))
+ return 0;
+
evm_status = evm_verify_current_integrity(dentry);
if ((evm_status == INTEGRITY_PASS) ||
(evm_status == INTEGRITY_NOXATTRS))
return 0;
goto out;
- }
+ } else if (is_unsupported_fs(dentry))
+ return 0;
evm_status = evm_verify_current_integrity(dentry);
if (evm_status == INTEGRITY_NOXATTRS) {
@@ -750,6 +773,9 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
if (!(evm_initialized & EVM_INIT_HMAC))
return;
+ if (is_unsupported_fs(dentry))
+ return;
+
evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
}
@@ -814,8 +840,12 @@ int evm_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
return 0;
+ if (is_unsupported_fs(dentry))
+ return 0;
+
if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return 0;
+
evm_status = evm_verify_current_integrity(dentry);
/*
* Writing attrs is safe for portable signatures, as portable signatures
@@ -859,10 +889,20 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
if (!(evm_initialized & EVM_INIT_HMAC))
return;
+ if (is_unsupported_fs(dentry))
+ return;
+
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
evm_update_evmxattr(dentry, NULL, NULL, 0);
}
+int evm_inode_copy_up_xattr(const char *name)
+{
+ if (strcmp(name, XATTR_NAME_EVM) == 0)
+ return 1; /* Discard */
+ return -EOPNOTSUPP;
+}
+
/*
* evm_inode_init_security - initializes security.evm HMAC value
*/
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index a6bd817efc1a..b98bfe9efd0c 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -243,7 +243,7 @@ config IMA_APPRAISE_MODSIG
to accept such signatures.
config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
- bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+ bool "Permit keys validly signed by a built-in, machine (if configured) or secondary"
depends on SYSTEM_TRUSTED_KEYRING
depends on SECONDARY_TRUSTED_KEYRING
depends on INTEGRITY_ASYMMETRIC_KEYS
@@ -251,14 +251,14 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
default n
help
Keys may be added to the IMA or IMA blacklist keyrings, if the
- key is validly signed by a CA cert in the system built-in or
- secondary trusted keyrings. The key must also have the
- digitalSignature usage set.
+ key is validly signed by a CA cert in the system built-in,
+ machine (if configured), or secondary trusted keyrings. The
+ key must also have the digitalSignature usage set.
Intermediate keys between those the kernel has compiled in and the
IMA keys to be added may be added to the system secondary keyring,
provided they are validly signed by a key already resident in the
- built-in or secondary trusted keyrings.
+ built-in, machine (if configured) or secondary trusted keyrings.
config IMA_BLACKLIST_KEYRING
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 51ad29940f05..f3738b2c8bcd 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -38,7 +38,7 @@ static int param_set_bufsize(const char *val, const struct kernel_param *kp)
size = memparse(val, NULL);
order = get_order(size);
- if (order > MAX_ORDER)
+ if (order > MAX_PAGE_ORDER)
return -EINVAL;
ima_maxorder = order;
ima_bufsize = PAGE_SIZE << order;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index ad133fe120db..dadc1d138118 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -129,8 +129,8 @@ void ima_add_kexec_buffer(struct kimage *image)
image->ima_buffer_size = kexec_segment_size;
image->ima_buffer = kexec_buffer;
- pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
- kbuf.mem);
+ kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
}
#endif /* IMA_KEXEC */
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 8af2136069d2..76f55dd13cb8 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -237,6 +237,10 @@ static int datablob_parse(char *datablob, const char **format,
break;
}
*decrypted_data = strsep(&datablob, " \t");
+ if (!*decrypted_data) {
+ pr_info("encrypted_key: decrypted_data is missing\n");
+ break;
+ }
ret = 0;
break;
case Opt_load:
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 19be69fa4d05..10ba439968f7 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1252,12 +1252,11 @@ long keyctl_instantiate_key(key_serial_t id,
key_serial_t ringid)
{
if (_payload && plen) {
- struct iovec iov;
struct iov_iter from;
int ret;
- ret = import_single_range(ITER_SOURCE, (void __user *)_payload, plen,
- &iov, &from);
+ ret = import_ubuf(ITER_SOURCE, (void __user *)_payload, plen,
+ &from);
if (unlikely(ret))
return ret;
diff --git a/security/landlock/cred.c b/security/landlock/cred.c
index 13dff2a31545..786af18c4a1c 100644
--- a/security/landlock/cred.c
+++ b/security/landlock/cred.c
@@ -42,5 +42,5 @@ static struct security_hook_list landlock_hooks[] __ro_after_init = {
__init void landlock_add_cred_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
- LANDLOCK_NAME);
+ &landlock_lsmid);
}
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index bc7c126deea2..fc520a06f9af 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -193,7 +193,7 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
*
* Returns NULL if no rule is found or if @dentry is negative.
*/
-static inline const struct landlock_rule *
+static const struct landlock_rule *
find_rule(const struct landlock_ruleset *const domain,
const struct dentry *const dentry)
{
@@ -220,7 +220,7 @@ find_rule(const struct landlock_ruleset *const domain,
* sockfs, pipefs), but can still be reachable through
* /proc/<pid>/fd/<file-descriptor>
*/
-static inline bool is_nouser_or_private(const struct dentry *dentry)
+static bool is_nouser_or_private(const struct dentry *dentry)
{
return (dentry->d_sb->s_flags & SB_NOUSER) ||
(d_is_positive(dentry) &&
@@ -264,7 +264,7 @@ static const struct landlock_ruleset *get_current_fs_domain(void)
*
* @layer_masks_child2: Optional child masks.
*/
-static inline bool no_more_access(
+static bool no_more_access(
const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
const bool child1_is_directory,
@@ -316,7 +316,7 @@ static inline bool no_more_access(
*
* Returns true if the request is allowed, false otherwise.
*/
-static inline bool
+static bool
scope_to_request(const access_mask_t access_request,
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
{
@@ -335,7 +335,7 @@ scope_to_request(const access_mask_t access_request,
* Returns true if there is at least one access right different than
* LANDLOCK_ACCESS_FS_REFER.
*/
-static inline bool
+static bool
is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
const access_mask_t access_request)
{
@@ -551,9 +551,9 @@ jump_up:
return allowed_parent1 && allowed_parent2;
}
-static inline int check_access_path(const struct landlock_ruleset *const domain,
- const struct path *const path,
- access_mask_t access_request)
+static int check_access_path(const struct landlock_ruleset *const domain,
+ const struct path *const path,
+ access_mask_t access_request)
{
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
@@ -565,8 +565,8 @@ static inline int check_access_path(const struct landlock_ruleset *const domain,
return -EACCES;
}
-static inline int current_check_access_path(const struct path *const path,
- const access_mask_t access_request)
+static int current_check_access_path(const struct path *const path,
+ const access_mask_t access_request)
{
const struct landlock_ruleset *const dom = get_current_fs_domain();
@@ -575,7 +575,7 @@ static inline int current_check_access_path(const struct path *const path,
return check_access_path(dom, path, access_request);
}
-static inline access_mask_t get_mode_access(const umode_t mode)
+static access_mask_t get_mode_access(const umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFLNK:
@@ -600,7 +600,7 @@ static inline access_mask_t get_mode_access(const umode_t mode)
}
}
-static inline access_mask_t maybe_remove(const struct dentry *const dentry)
+static access_mask_t maybe_remove(const struct dentry *const dentry)
{
if (d_is_negative(dentry))
return 0;
@@ -1086,7 +1086,7 @@ static int hook_path_truncate(const struct path *const path)
* Returns the access rights that are required for opening the given file,
* depending on the file type and open mode.
*/
-static inline access_mask_t
+static access_mask_t
get_required_file_open_access(const struct file *const file)
{
access_mask_t access = 0;
@@ -1223,5 +1223,5 @@ static struct security_hook_list landlock_hooks[] __ro_after_init = {
__init void landlock_add_fs_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
- LANDLOCK_NAME);
+ &landlock_lsmid);
}
diff --git a/security/landlock/net.c b/security/landlock/net.c
index aaa92c2b1f08..efa1b644a4af 100644
--- a/security/landlock/net.c
+++ b/security/landlock/net.c
@@ -196,5 +196,5 @@ static struct security_hook_list landlock_hooks[] __ro_after_init = {
__init void landlock_add_net_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
- LANDLOCK_NAME);
+ &landlock_lsmid);
}
diff --git a/security/landlock/ptrace.c b/security/landlock/ptrace.c
index 8a06d6c492bf..2bfc533d36e4 100644
--- a/security/landlock/ptrace.c
+++ b/security/landlock/ptrace.c
@@ -116,5 +116,5 @@ static struct security_hook_list landlock_hooks[] __ro_after_init = {
__init void landlock_add_ptrace_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
- LANDLOCK_NAME);
+ &landlock_lsmid);
}
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index ffedc99f2b68..e0a5fbf9201a 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -305,7 +305,7 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset,
return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
}
-static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
+static void get_hierarchy(struct landlock_hierarchy *const hierarchy)
{
if (hierarchy)
refcount_inc(&hierarchy->usage);
@@ -723,11 +723,12 @@ landlock_init_layer_masks(const struct landlock_ruleset *const domain,
/* Saves all handled accesses per layer. */
for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
const unsigned long access_req = access_request;
+ const access_mask_t access_mask =
+ get_access_mask(domain, layer_level);
unsigned long access_bit;
for_each_set_bit(access_bit, &access_req, num_access) {
- if (BIT_ULL(access_bit) &
- get_access_mask(domain, layer_level)) {
+ if (BIT_ULL(access_bit) & access_mask) {
(*layer_masks)[access_bit] |=
BIT_ULL(layer_level);
handled_accesses |= BIT_ULL(access_bit);
diff --git a/security/landlock/setup.c b/security/landlock/setup.c
index 3e11d303542f..f6dd33143b7f 100644
--- a/security/landlock/setup.c
+++ b/security/landlock/setup.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
#include "common.h"
#include "cred.h"
@@ -25,6 +26,11 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_superblock = sizeof(struct landlock_superblock_security),
};
+const struct lsm_id landlock_lsmid = {
+ .name = LANDLOCK_NAME,
+ .id = LSM_ID_LANDLOCK,
+};
+
static int __init landlock_init(void)
{
landlock_add_cred_hooks();
diff --git a/security/landlock/setup.h b/security/landlock/setup.h
index 1daffab1ab4b..c4252d46d49d 100644
--- a/security/landlock/setup.h
+++ b/security/landlock/setup.h
@@ -14,5 +14,6 @@
extern bool landlock_initialized;
extern struct lsm_blob_sizes landlock_blob_sizes;
+extern const struct lsm_id landlock_lsmid;
#endif /* _SECURITY_LANDLOCK_SETUP_H */
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index a9d40456a064..8e93cda130f1 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -20,6 +20,7 @@
#include <linux/string_helpers.h>
#include <linux/dm-verity-loadpin.h>
#include <uapi/linux/loadpin.h>
+#include <uapi/linux/lsm.h>
#define VERITY_DIGEST_FILE_HEADER "# LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS"
@@ -208,6 +209,11 @@ static int loadpin_load_data(enum kernel_load_data_id id, bool contents)
return loadpin_check(NULL, (enum kernel_read_file_id) id);
}
+static const struct lsm_id loadpin_lsmid = {
+ .name = "loadpin",
+ .id = LSM_ID_LOADPIN,
+};
+
static struct security_hook_list loadpin_hooks[] __ro_after_init = {
LSM_HOOK_INIT(sb_free_security, loadpin_sb_free_security),
LSM_HOOK_INIT(kernel_read_file, loadpin_read_file),
@@ -259,7 +265,8 @@ static int __init loadpin_init(void)
if (!register_sysctl("kernel/loadpin", loadpin_sysctl_table))
pr_notice("sysctl registration failed!\n");
#endif
- security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
+ security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks),
+ &loadpin_lsmid);
return 0;
}
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
index 68d19632aeb7..cd84d8ea1dfb 100644
--- a/security/lockdown/lockdown.c
+++ b/security/lockdown/lockdown.c
@@ -13,6 +13,7 @@
#include <linux/security.h>
#include <linux/export.h>
#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
static enum lockdown_reason kernel_locked_down;
@@ -75,6 +76,11 @@ static struct security_hook_list lockdown_hooks[] __ro_after_init = {
LSM_HOOK_INIT(locked_down, lockdown_is_locked_down),
};
+const struct lsm_id lockdown_lsmid = {
+ .name = "lockdown",
+ .id = LSM_ID_LOCKDOWN,
+};
+
static int __init lockdown_lsm_init(void)
{
#if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY)
@@ -83,7 +89,7 @@ static int __init lockdown_lsm_init(void)
lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX);
#endif
security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks),
- "lockdown");
+ &lockdown_lsmid);
return 0;
}
diff --git a/security/lsm_syscalls.c b/security/lsm_syscalls.c
new file mode 100644
index 000000000000..5d391b1f7e69
--- /dev/null
+++ b/security/lsm_syscalls.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System calls implementing the Linux Security Module API.
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#include <asm/current.h>
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/security.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
+
+/**
+ * lsm_name_to_attr - map an LSM attribute name to its ID
+ * @name: name of the attribute
+ *
+ * Returns the LSM attribute value associated with @name, or 0 if
+ * there is no mapping.
+ */
+u64 lsm_name_to_attr(const char *name)
+{
+ if (!strcmp(name, "current"))
+ return LSM_ATTR_CURRENT;
+ if (!strcmp(name, "exec"))
+ return LSM_ATTR_EXEC;
+ if (!strcmp(name, "fscreate"))
+ return LSM_ATTR_FSCREATE;
+ if (!strcmp(name, "keycreate"))
+ return LSM_ATTR_KEYCREATE;
+ if (!strcmp(name, "prev"))
+ return LSM_ATTR_PREV;
+ if (!strcmp(name, "sockcreate"))
+ return LSM_ATTR_SOCKCREATE;
+ return LSM_ATTR_UNDEF;
+}
+
+/**
+ * sys_lsm_set_self_attr - Set current task's security module attribute
+ * @attr: which attribute to set
+ * @ctx: the LSM contexts
+ * @size: size of @ctx
+ * @flags: reserved for future use
+ *
+ * Sets the calling task's LSM context. On success this function
+ * returns 0. If the attribute specified cannot be set a negative
+ * value indicating the reason for the error is returned.
+ */
+SYSCALL_DEFINE4(lsm_set_self_attr, unsigned int, attr, struct lsm_ctx __user *,
+ ctx, size_t, size, u32, flags)
+{
+ return security_setselfattr(attr, ctx, size, flags);
+}
+
+/**
+ * sys_lsm_get_self_attr - Return current task's security module attributes
+ * @attr: which attribute to return
+ * @ctx: the user-space destination for the information, or NULL
+ * @size: pointer to the size of space available to receive the data
+ * @flags: special handling options. LSM_FLAG_SINGLE indicates that only
+ * attributes associated with the LSM identified in the passed @ctx be
+ * reported.
+ *
+ * Returns the calling task's LSM contexts. On success this
+ * function returns the number of @ctx array elements. This value
+ * may be zero if there are no LSM contexts assigned. If @size is
+ * insufficient to contain the return data -E2BIG is returned and
+ * @size is set to the minimum required size. In all other cases
+ * a negative value indicating the error is returned.
+ */
+SYSCALL_DEFINE4(lsm_get_self_attr, unsigned int, attr, struct lsm_ctx __user *,
+ ctx, size_t __user *, size, u32, flags)
+{
+ return security_getselfattr(attr, ctx, size, flags);
+}
+
+/**
+ * sys_lsm_list_modules - Return a list of the active security modules
+ * @ids: the LSM module ids
+ * @size: pointer to size of @ids, updated on return
+ * @flags: reserved for future use, must be zero
+ *
+ * Returns a list of the active LSM ids. On success this function
+ * returns the number of @ids array elements. This value may be zero
+ * if there are no LSMs active. If @size is insufficient to contain
+ * the return data -E2BIG is returned and @size is set to the minimum
+ * required size. In all other cases a negative value indicating the
+ * error is returned.
+ */
+SYSCALL_DEFINE3(lsm_list_modules, u64 __user *, ids, size_t __user *, size,
+ u32, flags)
+{
+ size_t total_size = lsm_active_cnt * sizeof(*ids);
+ size_t usize;
+ int i;
+
+ if (flags)
+ return -EINVAL;
+
+ if (get_user(usize, size))
+ return -EFAULT;
+
+ if (put_user(total_size, size) != 0)
+ return -EFAULT;
+
+ if (usize < total_size)
+ return -E2BIG;
+
+ for (i = 0; i < lsm_active_cnt; i++)
+ if (put_user(lsm_idlist[i]->id, ids++))
+ return -EFAULT;
+
+ return lsm_active_cnt;
+}
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
index 5be5894aa0ea..1ba564f097f5 100644
--- a/security/safesetid/lsm.c
+++ b/security/safesetid/lsm.c
@@ -19,6 +19,7 @@
#include <linux/ptrace.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
+#include <uapi/linux/lsm.h>
#include "lsm.h"
/* Flag indicating whether initialization completed */
@@ -261,6 +262,11 @@ static int safesetid_task_fix_setgroups(struct cred *new, const struct cred *old
return 0;
}
+static const struct lsm_id safesetid_lsmid = {
+ .name = "safesetid",
+ .id = LSM_ID_SAFESETID,
+};
+
static struct security_hook_list safesetid_security_hooks[] = {
LSM_HOOK_INIT(task_fix_setuid, safesetid_task_fix_setuid),
LSM_HOOK_INIT(task_fix_setgid, safesetid_task_fix_setgid),
@@ -271,7 +277,8 @@ static struct security_hook_list safesetid_security_hooks[] = {
static int __init safesetid_security_init(void)
{
security_add_hooks(safesetid_security_hooks,
- ARRAY_SIZE(safesetid_security_hooks), "safesetid");
+ ARRAY_SIZE(safesetid_security_hooks),
+ &safesetid_lsmid);
/* Report that SafeSetID successfully initialized */
safesetid_initialized = 1;
diff --git a/security/security.c b/security/security.c
index dcb3e7014f9b..0144a98d3712 100644
--- a/security/security.c
+++ b/security/security.c
@@ -35,6 +35,24 @@
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
/*
+ * How many LSMs are built into the kernel as determined at
+ * build time. Used to determine fixed array sizes.
+ * The capability module is accounted for by CONFIG_SECURITY
+ */
+#define LSM_CONFIG_COUNT ( \
+ (IS_ENABLED(CONFIG_SECURITY) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_SELINUX) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_SMACK) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_TOMOYO) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_APPARMOR) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_YAMA) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_LOADPIN) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_SAFESETID) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_BPF_LSM) ? 1 : 0) + \
+ (IS_ENABLED(CONFIG_SECURITY_LANDLOCK) ? 1 : 0))
+
+/*
* These are descriptions of the reasons that can be passed to the
* security_locked_down() LSM hook. Placing this array here allows
* all security modules to use the same descriptions for auditing
@@ -245,6 +263,12 @@ static void __init initialize_lsm(struct lsm_info *lsm)
}
}
+/*
+ * Current index to use while initializing the lsm id list.
+ */
+u32 lsm_active_cnt __ro_after_init;
+const struct lsm_id *lsm_idlist[LSM_CONFIG_COUNT];
+
/* Populate ordered LSMs list from comma-separated LSM name list. */
static void __init ordered_lsm_parse(const char *order, const char *origin)
{
@@ -513,17 +537,29 @@ static int lsm_append(const char *new, char **result)
* security_add_hooks - Add a modules hooks to the hook lists.
* @hooks: the hooks to add
* @count: the number of hooks to add
- * @lsm: the name of the security module
+ * @lsmid: the identification information for the security module
*
* Each LSM has to register its hooks with the infrastructure.
*/
void __init security_add_hooks(struct security_hook_list *hooks, int count,
- const char *lsm)
+ const struct lsm_id *lsmid)
{
int i;
+ /*
+ * A security module may call security_add_hooks() more
+ * than once during initialization, and LSM initialization
+ * is serialized. Landlock is one such case.
+ * Look at the previous entry, if there is one, for duplication.
+ */
+ if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) {
+ if (lsm_active_cnt >= LSM_CONFIG_COUNT)
+ panic("%s Too many LSMs registered.\n", __func__);
+ lsm_idlist[lsm_active_cnt++] = lsmid;
+ }
+
for (i = 0; i < count; i++) {
- hooks[i].lsm = lsm;
+ hooks[i].lsmid = lsmid;
hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
}
@@ -532,7 +568,7 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count,
* and fix this up afterwards.
*/
if (slab_is_available()) {
- if (lsm_append(lsm, &lsm_names) < 0)
+ if (lsm_append(lsmid->name, &lsm_names) < 0)
panic("%s - Cannot get early memory.\n", __func__);
}
}
@@ -734,6 +770,54 @@ static int lsm_superblock_alloc(struct super_block *sb)
return 0;
}
+/**
+ * lsm_fill_user_ctx - Fill a user space lsm_ctx structure
+ * @uctx: a userspace LSM context to be filled
+ * @uctx_len: available uctx size (input), used uctx size (output)
+ * @val: the new LSM context value
+ * @val_len: the size of the new LSM context value
+ * @id: LSM id
+ * @flags: LSM defined flags
+ *
+ * Fill all of the fields in a userspace lsm_ctx structure.
+ *
+ * Returns 0 on success, -E2BIG if userspace buffer is not large enough,
+ * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated.
+ */
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len,
+ void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ struct lsm_ctx *nctx = NULL;
+ size_t nctx_len;
+ int rc = 0;
+
+ nctx_len = ALIGN(struct_size(nctx, ctx, val_len), sizeof(void *));
+ if (nctx_len > *uctx_len) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ nctx = kzalloc(nctx_len, GFP_KERNEL);
+ if (nctx == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ nctx->id = id;
+ nctx->flags = flags;
+ nctx->len = nctx_len;
+ nctx->ctx_len = val_len;
+ memcpy(nctx->ctx, val, val_len);
+
+ if (copy_to_user(uctx, nctx, nctx_len))
+ rc = -EFAULT;
+
+out:
+ kfree(nctx);
+ *uctx_len = nctx_len;
+ return rc;
+}
+
/*
* The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
* can be accessed with:
@@ -2539,7 +2623,7 @@ int security_inode_copy_up_xattr(const char *name)
return rc;
}
- return LSM_RET_DEFAULT(inode_copy_up_xattr);
+ return evm_inode_copy_up_xattr(name);
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);
@@ -2580,13 +2664,7 @@ int security_kernfs_init_security(struct kernfs_node *kn_dir,
*/
int security_file_permission(struct file *file, int mask)
{
- int ret;
-
- ret = call_int_hook(file_permission, 0, file, mask);
- if (ret)
- return ret;
-
- return fsnotify_perm(file, mask);
+ return call_int_hook(file_permission, 0, file, mask);
}
/**
@@ -2648,6 +2726,24 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL_GPL(security_file_ioctl);
+/**
+ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
+ * @file: associated file
+ * @cmd: ioctl cmd
+ * @arg: ioctl arguments
+ *
+ * Compat version of security_file_ioctl() that correctly handles 32-bit
+ * processes running on 64-bit kernels.
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
+
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
/*
@@ -2837,7 +2933,7 @@ int security_file_open(struct file *file)
if (ret)
return ret;
- return fsnotify_perm(file, MAY_OPEN);
+ return fsnotify_open_perm(file);
}
/**
@@ -3800,10 +3896,158 @@ void security_d_instantiate(struct dentry *dentry, struct inode *inode)
}
EXPORT_SYMBOL(security_d_instantiate);
+/*
+ * Please keep this in sync with it's counterpart in security/lsm_syscalls.c
+ */
+
+/**
+ * security_getselfattr - Read an LSM attribute of the current process.
+ * @attr: which attribute to return
+ * @uctx: the user-space destination for the information, or NULL
+ * @size: pointer to the size of space available to receive the data
+ * @flags: special handling options. LSM_FLAG_SINGLE indicates that only
+ * attributes associated with the LSM identified in the passed @ctx be
+ * reported.
+ *
+ * A NULL value for @uctx can be used to get both the number of attributes
+ * and the size of the data.
+ *
+ * Returns the number of attributes found on success, negative value
+ * on error. @size is reset to the total size of the data.
+ * If @size is insufficient to contain the data -E2BIG is returned.
+ */
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
+ size_t __user *size, u32 flags)
+{
+ struct security_hook_list *hp;
+ struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, };
+ u8 __user *base = (u8 __user *)uctx;
+ size_t total = 0;
+ size_t entrysize;
+ size_t left;
+ bool toobig = false;
+ bool single = false;
+ int count = 0;
+ int rc;
+
+ if (attr == LSM_ATTR_UNDEF)
+ return -EINVAL;
+ if (size == NULL)
+ return -EINVAL;
+ if (get_user(left, size))
+ return -EFAULT;
+
+ if (flags) {
+ /*
+ * Only flag supported is LSM_FLAG_SINGLE
+ */
+ if (flags != LSM_FLAG_SINGLE || !uctx)
+ return -EINVAL;
+ if (copy_from_user(&lctx, uctx, sizeof(lctx)))
+ return -EFAULT;
+ /*
+ * If the LSM ID isn't specified it is an error.
+ */
+ if (lctx.id == LSM_ID_UNDEF)
+ return -EINVAL;
+ single = true;
+ }
+
+ /*
+ * In the usual case gather all the data from the LSMs.
+ * In the single case only get the data from the LSM specified.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.getselfattr, list) {
+ if (single && lctx.id != hp->lsmid->id)
+ continue;
+ entrysize = left;
+ if (base)
+ uctx = (struct lsm_ctx __user *)(base + total);
+ rc = hp->hook.getselfattr(attr, uctx, &entrysize, flags);
+ if (rc == -EOPNOTSUPP) {
+ rc = 0;
+ continue;
+ }
+ if (rc == -E2BIG) {
+ rc = 0;
+ left = 0;
+ toobig = true;
+ } else if (rc < 0)
+ return rc;
+ else
+ left -= entrysize;
+
+ total += entrysize;
+ count += rc;
+ if (single)
+ break;
+ }
+ if (put_user(total, size))
+ return -EFAULT;
+ if (toobig)
+ return -E2BIG;
+ if (count == 0)
+ return LSM_RET_DEFAULT(getselfattr);
+ return count;
+}
+
+/*
+ * Please keep this in sync with it's counterpart in security/lsm_syscalls.c
+ */
+
+/**
+ * security_setselfattr - Set an LSM attribute on the current process.
+ * @attr: which attribute to set
+ * @uctx: the user-space source for the information
+ * @size: the size of the data
+ * @flags: reserved for future use, must be 0
+ *
+ * Set an LSM attribute for the current process. The LSM, attribute
+ * and new value are included in @uctx.
+ *
+ * Returns 0 on success, -EINVAL if the input is inconsistent, -EFAULT
+ * if the user buffer is inaccessible, E2BIG if size is too big, or an
+ * LSM specific failure.
+ */
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
+ size_t size, u32 flags)
+{
+ struct security_hook_list *hp;
+ struct lsm_ctx *lctx;
+ int rc = LSM_RET_DEFAULT(setselfattr);
+
+ if (flags)
+ return -EINVAL;
+ if (size < sizeof(*lctx))
+ return -EINVAL;
+ if (size > PAGE_SIZE)
+ return -E2BIG;
+
+ lctx = memdup_user(uctx, size);
+ if (IS_ERR(lctx))
+ return PTR_ERR(lctx);
+
+ if (size < lctx->len || size < lctx->ctx_len + sizeof(*lctx) ||
+ lctx->len < lctx->ctx_len + sizeof(*lctx)) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+
+ hlist_for_each_entry(hp, &security_hook_heads.setselfattr, list)
+ if ((hp->lsmid->id) == lctx->id) {
+ rc = hp->hook.setselfattr(attr, lctx, size, flags);
+ break;
+ }
+
+free_out:
+ kfree(lctx);
+ return rc;
+}
+
/**
* security_getprocattr() - Read an attribute for a task
* @p: the task
- * @lsm: LSM name
+ * @lsmid: LSM identification
* @name: attribute name
* @value: attribute value
*
@@ -3811,13 +4055,13 @@ EXPORT_SYMBOL(security_d_instantiate);
*
* Return: Returns the length of @value on success, a negative value otherwise.
*/
-int security_getprocattr(struct task_struct *p, const char *lsm,
- const char *name, char **value)
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
+ char **value)
{
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
- if (lsm != NULL && strcmp(lsm, hp->lsm))
+ if (lsmid != 0 && lsmid != hp->lsmid->id)
continue;
return hp->hook.getprocattr(p, name, value);
}
@@ -3826,7 +4070,7 @@ int security_getprocattr(struct task_struct *p, const char *lsm,
/**
* security_setprocattr() - Set an attribute for a task
- * @lsm: LSM name
+ * @lsmid: LSM identification
* @name: attribute name
* @value: attribute value
* @size: attribute value size
@@ -3836,13 +4080,12 @@ int security_getprocattr(struct task_struct *p, const char *lsm,
*
* Return: Returns bytes written on success, a negative value otherwise.
*/
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size)
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size)
{
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
- if (lsm != NULL && strcmp(lsm, hp->lsm))
+ if (lsmid != 0 && lsmid != hp->lsmid->id)
continue;
return hp->hook.setprocattr(name, value, size);
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 340b2bbbb2dd..5e5fd5be6d93 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -92,6 +92,7 @@
#include <linux/fsnotify.h>
#include <linux/fanotify.h>
#include <linux/io_uring.h>
+#include <uapi/linux/lsm.h>
#include "avc.h"
#include "objsec.h"
@@ -2313,6 +2314,19 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
new_tsec->keycreate_sid = 0;
new_tsec->sockcreate_sid = 0;
+ /*
+ * Before policy is loaded, label any task outside kernel space
+ * as SECINITSID_INIT, so that any userspace tasks surviving from
+ * early boot end up with a label different from SECINITSID_KERNEL
+ * (if the policy chooses to set SECINITSID_INIT != SECINITSID_KERNEL).
+ */
+ if (!selinux_initialized()) {
+ new_tsec->sid = SECINITSID_INIT;
+ /* also clear the exec_sid just in case */
+ new_tsec->exec_sid = 0;
+ return 0;
+ }
+
if (old_tsec->exec_sid) {
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
@@ -3725,6 +3739,33 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
return error;
}
+static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /*
+ * If we are in a 64-bit kernel running 32-bit userspace, we need to
+ * make sure we don't compare 32-bit flags to 64-bit flags.
+ */
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ case FS_IOC32_SETVERSION:
+ cmd = FS_IOC_SETVERSION;
+ break;
+ default:
+ break;
+ }
+
+ return selinux_file_ioctl(file, cmd, arg);
+}
+
static int default_noexec __ro_after_init;
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
@@ -4547,6 +4588,21 @@ static int sock_has_perm(struct sock *sk, u32 perms)
if (sksec->sid == SECINITSID_KERNEL)
return 0;
+ /*
+ * Before POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT, sockets that
+ * inherited the kernel context from early boot used to be skipped
+ * here, so preserve that behavior unless the capability is set.
+ *
+ * By setting the capability the policy signals that it is ready
+ * for this quirk to be fixed. Note that sockets created by a kernel
+ * thread or a usermode helper executed without a transition will
+ * still be skipped in this check regardless of the policycap
+ * setting.
+ */
+ if (!selinux_policycap_userspace_initial_context() &&
+ sksec->sid == SECINITSID_INIT)
+ return 0;
+
ad_net_init_from_sk(&ad, &net, sk);
return avc_has_perm(current_sid(), sksec->sid, sksec->sclass, perms,
@@ -4661,6 +4717,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
return -EINVAL;
addr4 = (struct sockaddr_in *)address;
if (family_sa == AF_UNSPEC) {
+ if (family == PF_INET6) {
+ /* Length check from inet6_bind_sk() */
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ /* Family check from __inet6_bind() */
+ goto err_af;
+ }
/* see __inet_bind(), we only want to allow
* AF_UNSPEC if the address is INADDR_ANY
*/
@@ -6278,8 +6341,8 @@ static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode)
inode_doinit_with_dentry(inode, dentry);
}
-static int selinux_getprocattr(struct task_struct *p,
- const char *name, char **value)
+static int selinux_lsm_getattr(unsigned int attr, struct task_struct *p,
+ char **value)
{
const struct task_security_struct *__tsec;
u32 sid;
@@ -6296,20 +6359,27 @@ static int selinux_getprocattr(struct task_struct *p,
goto bad;
}
- if (!strcmp(name, "current"))
+ switch (attr) {
+ case LSM_ATTR_CURRENT:
sid = __tsec->sid;
- else if (!strcmp(name, "prev"))
+ break;
+ case LSM_ATTR_PREV:
sid = __tsec->osid;
- else if (!strcmp(name, "exec"))
+ break;
+ case LSM_ATTR_EXEC:
sid = __tsec->exec_sid;
- else if (!strcmp(name, "fscreate"))
+ break;
+ case LSM_ATTR_FSCREATE:
sid = __tsec->create_sid;
- else if (!strcmp(name, "keycreate"))
+ break;
+ case LSM_ATTR_KEYCREATE:
sid = __tsec->keycreate_sid;
- else if (!strcmp(name, "sockcreate"))
+ break;
+ case LSM_ATTR_SOCKCREATE:
sid = __tsec->sockcreate_sid;
- else {
- error = -EINVAL;
+ break;
+ default:
+ error = -EOPNOTSUPP;
goto bad;
}
rcu_read_unlock();
@@ -6327,7 +6397,7 @@ bad:
return error;
}
-static int selinux_setprocattr(const char *name, void *value, size_t size)
+static int selinux_lsm_setattr(u64 attr, void *value, size_t size)
{
struct task_security_struct *tsec;
struct cred *new;
@@ -6338,23 +6408,31 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
/*
* Basic control over ability to set these attributes at all.
*/
- if (!strcmp(name, "exec"))
+ switch (attr) {
+ case LSM_ATTR_EXEC:
error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETEXEC, NULL);
- else if (!strcmp(name, "fscreate"))
+ break;
+ case LSM_ATTR_FSCREATE:
error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETFSCREATE, NULL);
- else if (!strcmp(name, "keycreate"))
+ break;
+ case LSM_ATTR_KEYCREATE:
error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETKEYCREATE, NULL);
- else if (!strcmp(name, "sockcreate"))
+ break;
+ case LSM_ATTR_SOCKCREATE:
error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETSOCKCREATE, NULL);
- else if (!strcmp(name, "current"))
+ break;
+ case LSM_ATTR_CURRENT:
error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETCURRENT, NULL);
- else
- error = -EINVAL;
+ break;
+ default:
+ error = -EOPNOTSUPP;
+ break;
+ }
if (error)
return error;
@@ -6366,13 +6444,14 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
}
error = security_context_to_sid(value, size,
&sid, GFP_KERNEL);
- if (error == -EINVAL && !strcmp(name, "fscreate")) {
+ if (error == -EINVAL && attr == LSM_ATTR_FSCREATE) {
if (!has_cap_mac_admin(true)) {
struct audit_buffer *ab;
size_t audit_size;
- /* We strip a nul only if it is at the end, otherwise the
- * context contains a nul and we should audit that */
+ /* We strip a nul only if it is at the end,
+ * otherwise the context contains a nul and
+ * we should audit that */
if (str[size - 1] == '\0')
audit_size = size - 1;
else
@@ -6383,7 +6462,8 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
if (!ab)
return error;
audit_log_format(ab, "op=fscreate invalid_context=");
- audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_n_untrustedstring(ab, value,
+ audit_size);
audit_log_end(ab);
return error;
@@ -6406,11 +6486,11 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
tsec = selinux_cred(new);
- if (!strcmp(name, "exec")) {
+ if (attr == LSM_ATTR_EXEC) {
tsec->exec_sid = sid;
- } else if (!strcmp(name, "fscreate")) {
+ } else if (attr == LSM_ATTR_FSCREATE) {
tsec->create_sid = sid;
- } else if (!strcmp(name, "keycreate")) {
+ } else if (attr == LSM_ATTR_KEYCREATE) {
if (sid) {
error = avc_has_perm(mysid, sid,
SECCLASS_KEY, KEY__CREATE, NULL);
@@ -6418,14 +6498,13 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
goto abort_change;
}
tsec->keycreate_sid = sid;
- } else if (!strcmp(name, "sockcreate")) {
+ } else if (attr == LSM_ATTR_SOCKCREATE) {
tsec->sockcreate_sid = sid;
- } else if (!strcmp(name, "current")) {
+ } else if (attr == LSM_ATTR_CURRENT) {
error = -EINVAL;
if (sid == 0)
goto abort_change;
- /* Only allow single threaded processes to change context */
if (!current_is_single_threaded()) {
error = security_bounded_transition(tsec->sid, sid);
if (error)
@@ -6462,6 +6541,69 @@ abort_change:
return error;
}
+/**
+ * selinux_getselfattr - Get SELinux current task attributes
+ * @attr: the requested attribute
+ * @ctx: buffer to receive the result
+ * @size: buffer size (input), buffer size used (output)
+ * @flags: unused
+ *
+ * Fill the passed user space @ctx with the details of the requested
+ * attribute.
+ *
+ * Returns the number of attributes on success, an error code otherwise.
+ * There will only ever be one attribute.
+ */
+static int selinux_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t *size, u32 flags)
+{
+ int rc;
+ char *val;
+ int val_len;
+
+ val_len = selinux_lsm_getattr(attr, current, &val);
+ if (val_len < 0)
+ return val_len;
+ rc = lsm_fill_user_ctx(ctx, size, val, val_len, LSM_ID_SELINUX, 0);
+ kfree(val);
+ return (!rc ? 1 : rc);
+}
+
+static int selinux_setselfattr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, u32 flags)
+{
+ int rc;
+
+ rc = selinux_lsm_setattr(attr, ctx->ctx, ctx->ctx_len);
+ if (rc > 0)
+ return 0;
+ return rc;
+}
+
+static int selinux_getprocattr(struct task_struct *p,
+ const char *name, char **value)
+{
+ unsigned int attr = lsm_name_to_attr(name);
+ int rc;
+
+ if (attr) {
+ rc = selinux_lsm_getattr(attr, p, value);
+ if (rc != -EOPNOTSUPP)
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+static int selinux_setprocattr(const char *name, void *value, size_t size)
+{
+ int attr = lsm_name_to_attr(name);
+
+ if (attr)
+ return selinux_lsm_setattr(attr, value, size);
+ return -EINVAL;
+}
+
static int selinux_ismaclabel(const char *name)
{
return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
@@ -6944,6 +7086,11 @@ static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
}
#endif /* CONFIG_IO_URING */
+static const struct lsm_id selinux_lsmid = {
+ .name = "selinux",
+ .id = LSM_ID_SELINUX,
+};
+
/*
* IMPORTANT NOTE: When adding new hooks, please be careful to keep this order:
* 1. any hooks that don't belong to (2.) or (3.) below,
@@ -7030,6 +7177,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(file_permission, selinux_file_permission),
LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
@@ -7085,6 +7233,8 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(d_instantiate, selinux_d_instantiate),
+ LSM_HOOK_INIT(getselfattr, selinux_getselfattr),
+ LSM_HOOK_INIT(setselfattr, selinux_setselfattr),
LSM_HOOK_INIT(getprocattr, selinux_getprocattr),
LSM_HOOK_INIT(setprocattr, selinux_setprocattr),
@@ -7264,7 +7414,8 @@ static __init int selinux_init(void)
hashtab_cache_init();
- security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks), "selinux");
+ security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks),
+ &selinux_lsmid);
if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
panic("SELinux: Unable to register AVC netcache callback\n");
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index d5495134a5b9..52aca71210b4 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -57,4 +57,3 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *rule);
int selinux_audit_rule_known(struct audit_krule *rule);
#endif /* _SELINUX_AUDIT_H */
-
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 8f0aa66ccb13..96a614d47df8 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -4,6 +4,7 @@
*
* Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
*/
+
#ifndef _SELINUX_AVC_H_
#define _SELINUX_AVC_H_
@@ -60,11 +61,8 @@ struct selinux_audit_data {
void __init avc_init(void);
-static inline u32 avc_audit_required(u32 requested,
- struct av_decision *avd,
- int result,
- u32 auditdeny,
- u32 *deniedp)
+static inline u32 avc_audit_required(u32 requested, struct av_decision *avd,
+ int result, u32 auditdeny, u32 *deniedp)
{
u32 denied, audited;
denied = requested & ~avd->allowed;
@@ -96,9 +94,8 @@ static inline u32 avc_audit_required(u32 requested,
return audited;
}
-int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, u32 audited, u32 denied, int result,
- struct common_audit_data *a);
+int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited,
+ u32 denied, int result, struct common_audit_data *a);
/**
* avc_audit - Audit the granting or denial of permissions.
@@ -119,36 +116,29 @@ int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
-static inline int avc_audit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd,
- int result,
+static inline int avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ struct av_decision *avd, int result,
struct common_audit_data *a)
{
u32 audited, denied;
audited = avc_audit_required(requested, avd, result, 0, &denied);
if (likely(!audited))
return 0;
- return slow_avc_audit(ssid, tsid, tclass,
- requested, audited, denied, result,
- a);
+ return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied,
+ result, a);
}
-#define AVC_STRICT 1 /* Ignore permissive mode. */
-#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- unsigned flags,
- struct av_decision *avd);
+#define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
+int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ unsigned int flags, struct av_decision *avd);
-int avc_has_perm(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
+int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct common_audit_data *auditdata);
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
u8 driver, u8 perm, struct common_audit_data *ad);
-
u32 avc_policy_seqno(void);
#define AVC_CALLBACK_GRANT 1
@@ -156,7 +146,7 @@ u32 avc_policy_seqno(void);
#define AVC_CALLBACK_REVOKE 4
#define AVC_CALLBACK_RESET 8
#define AVC_CALLBACK_AUDITALLOW_ENABLE 16
-#define AVC_CALLBACK_AUDITALLOW_DISABLE 32
+#define AVC_CALLBACK_AUDITALLOW_DISABLE 32
#define AVC_CALLBACK_AUDITDENY_ENABLE 64
#define AVC_CALLBACK_AUDITDENY_DISABLE 128
#define AVC_CALLBACK_ADD_XPERMS 256
@@ -173,4 +163,3 @@ DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
#endif
#endif /* _SELINUX_AVC_H_ */
-
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 88b139e086c4..48ad64d54032 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -4,6 +4,7 @@
*
* Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
*/
+
#ifndef _SELINUX_AVC_SS_H_
#define _SELINUX_AVC_SS_H_
@@ -20,4 +21,3 @@ struct security_class_mapping {
extern const struct security_class_mapping secclass_map[];
#endif /* _SELINUX_AVC_SS_H_ */
-
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index a3c380775d41..7229c9bf6c27 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,34 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#include <linux/capability.h>
#include <linux/socket.h>
-#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
- "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
+#define COMMON_FILE_SOCK_PERMS \
+ "ioctl", "read", "write", "create", "getattr", "setattr", "lock", \
+ "relabelfrom", "relabelto", "append", "map"
-#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
- "rename", "execute", "quotaon", "mounton", "audit_access", \
- "open", "execmod", "watch", "watch_mount", "watch_sb", \
- "watch_with_perm", "watch_reads"
+#define COMMON_FILE_PERMS \
+ COMMON_FILE_SOCK_PERMS, "unlink", "link", "rename", "execute", \
+ "quotaon", "mounton", "audit_access", "open", "execmod", \
+ "watch", "watch_mount", "watch_sb", "watch_with_perm", \
+ "watch_reads"
-#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
- "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
- "sendto", "name_bind"
+#define COMMON_SOCK_PERMS \
+ COMMON_FILE_SOCK_PERMS, "bind", "connect", "listen", "accept", \
+ "getopt", "setopt", "shutdown", "recvfrom", "sendto", \
+ "name_bind"
-#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
- "write", "associate", "unix_read", "unix_write"
+#define COMMON_IPC_PERMS \
+ "create", "destroy", "getattr", "setattr", "read", "write", \
+ "associate", "unix_read", "unix_write"
-#define COMMON_CAP_PERMS "chown", "dac_override", "dac_read_search", \
- "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap", \
- "linux_immutable", "net_bind_service", "net_broadcast", \
- "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module", \
- "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin", \
- "sys_boot", "sys_nice", "sys_resource", "sys_time", \
- "sys_tty_config", "mknod", "lease", "audit_write", \
- "audit_control", "setfcap"
+#define COMMON_CAP_PERMS \
+ "chown", "dac_override", "dac_read_search", "fowner", "fsetid", \
+ "kill", "setgid", "setuid", "setpcap", "linux_immutable", \
+ "net_bind_service", "net_broadcast", "net_admin", "net_raw", \
+ "ipc_lock", "ipc_owner", "sys_module", "sys_rawio", \
+ "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin", \
+ "sys_boot", "sys_nice", "sys_resource", "sys_time", \
+ "sys_tty_config", "mknod", "lease", "audit_write", \
+ "audit_control", "setfcap"
-#define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \
- "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf", \
- "checkpoint_restore"
+#define COMMON_CAP2_PERMS \
+ "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend", \
+ "audit_read", "perfmon", "bpf", "checkpoint_restore"
#if CAP_LAST_CAP > CAP_CHECKPOINT_RESTORE
#error New capability defined, please update COMMON_CAP2_PERMS.
@@ -40,224 +46,140 @@
*/
const struct security_class_mapping secclass_map[] = {
{ "security",
- { "compute_av", "compute_create", "compute_member",
- "check_context", "load_policy", "compute_relabel",
- "compute_user", "setenforce", "setbool", "setsecparam",
- "setcheckreqprot", "read_policy", "validate_trans", NULL } },
+ { "compute_av", "compute_create", "compute_member", "check_context",
+ "load_policy", "compute_relabel", "compute_user", "setenforce",
+ "setbool", "setsecparam", "setcheckreqprot", "read_policy",
+ "validate_trans", NULL } },
{ "process",
- { "fork", "transition", "sigchld", "sigkill",
- "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
- "getsession", "getpgid", "setpgid", "getcap", "setcap", "share",
- "getattr", "setexec", "setfscreate", "noatsecure", "siginh",
- "setrlimit", "rlimitinh", "dyntransition", "setcurrent",
- "execmem", "execstack", "execheap", "setkeycreate",
- "setsockcreate", "getrlimit", NULL } },
- { "process2",
- { "nnp_transition", "nosuid_transition", NULL } },
+ { "fork", "transition", "sigchld", "sigkill",
+ "sigstop", "signull", "signal", "ptrace",
+ "getsched", "setsched", "getsession", "getpgid",
+ "setpgid", "getcap", "setcap", "share",
+ "getattr", "setexec", "setfscreate", "noatsecure",
+ "siginh", "setrlimit", "rlimitinh", "dyntransition",
+ "setcurrent", "execmem", "execstack", "execheap",
+ "setkeycreate", "setsockcreate", "getrlimit", NULL } },
+ { "process2", { "nnp_transition", "nosuid_transition", NULL } },
{ "system",
- { "ipc_info", "syslog_read", "syslog_mod",
- "syslog_console", "module_request", "module_load", NULL } },
- { "capability",
- { COMMON_CAP_PERMS, NULL } },
+ { "ipc_info", "syslog_read", "syslog_mod", "syslog_console",
+ "module_request", "module_load", NULL } },
+ { "capability", { COMMON_CAP_PERMS, NULL } },
{ "filesystem",
- { "mount", "remount", "unmount", "getattr",
- "relabelfrom", "relabelto", "associate", "quotamod",
- "quotaget", "watch", NULL } },
+ { "mount", "remount", "unmount", "getattr", "relabelfrom",
+ "relabelto", "associate", "quotamod", "quotaget", "watch", NULL } },
{ "file",
- { COMMON_FILE_PERMS,
- "execute_no_trans", "entrypoint", NULL } },
+ { COMMON_FILE_PERMS, "execute_no_trans", "entrypoint", NULL } },
{ "dir",
- { COMMON_FILE_PERMS, "add_name", "remove_name",
- "reparent", "search", "rmdir", NULL } },
+ { COMMON_FILE_PERMS, "add_name", "remove_name", "reparent", "search",
+ "rmdir", NULL } },
{ "fd", { "use", NULL } },
- { "lnk_file",
- { COMMON_FILE_PERMS, NULL } },
- { "chr_file",
- { COMMON_FILE_PERMS, NULL } },
- { "blk_file",
- { COMMON_FILE_PERMS, NULL } },
- { "sock_file",
- { COMMON_FILE_PERMS, NULL } },
- { "fifo_file",
- { COMMON_FILE_PERMS, NULL } },
- { "socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { "lnk_file", { COMMON_FILE_PERMS, NULL } },
+ { "chr_file", { COMMON_FILE_PERMS, NULL } },
+ { "blk_file", { COMMON_FILE_PERMS, NULL } },
+ { "sock_file", { COMMON_FILE_PERMS, NULL } },
+ { "fifo_file", { COMMON_FILE_PERMS, NULL } },
+ { "socket", { COMMON_SOCK_PERMS, NULL } },
{ "tcp_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", "name_connect",
- NULL } },
- { "udp_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", NULL } },
- { "rawip_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", NULL } },
- { "node",
- { "recvfrom", "sendto", NULL } },
- { "netif",
- { "ingress", "egress", NULL } },
- { "netlink_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "packet_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "key_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "unix_stream_socket",
- { COMMON_SOCK_PERMS, "connectto", NULL } },
- { "unix_dgram_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "sem",
- { COMMON_IPC_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "node_bind", "name_connect", NULL } },
+ { "udp_socket", { COMMON_SOCK_PERMS, "node_bind", NULL } },
+ { "rawip_socket", { COMMON_SOCK_PERMS, "node_bind", NULL } },
+ { "node", { "recvfrom", "sendto", NULL } },
+ { "netif", { "ingress", "egress", NULL } },
+ { "netlink_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "packet_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "key_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "unix_stream_socket", { COMMON_SOCK_PERMS, "connectto", NULL } },
+ { "unix_dgram_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "sem", { COMMON_IPC_PERMS, NULL } },
{ "msg", { "send", "receive", NULL } },
- { "msgq",
- { COMMON_IPC_PERMS, "enqueue", NULL } },
- { "shm",
- { COMMON_IPC_PERMS, "lock", NULL } },
- { "ipc",
- { COMMON_IPC_PERMS, NULL } },
+ { "msgq", { COMMON_IPC_PERMS, "enqueue", NULL } },
+ { "shm", { COMMON_IPC_PERMS, "lock", NULL } },
+ { "ipc", { COMMON_IPC_PERMS, NULL } },
{ "netlink_route_socket",
- { COMMON_SOCK_PERMS,
- "nlmsg_read", "nlmsg_write", NULL } },
+ { COMMON_SOCK_PERMS, "nlmsg_read", "nlmsg_write", NULL } },
{ "netlink_tcpdiag_socket",
- { COMMON_SOCK_PERMS,
- "nlmsg_read", "nlmsg_write", NULL } },
- { "netlink_nflog_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "nlmsg_read", "nlmsg_write", NULL } },
+ { "netlink_nflog_socket", { COMMON_SOCK_PERMS, NULL } },
{ "netlink_xfrm_socket",
- { COMMON_SOCK_PERMS,
- "nlmsg_read", "nlmsg_write", NULL } },
- { "netlink_selinux_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_iscsi_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "nlmsg_read", "nlmsg_write", NULL } },
+ { "netlink_selinux_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_iscsi_socket", { COMMON_SOCK_PERMS, NULL } },
{ "netlink_audit_socket",
- { COMMON_SOCK_PERMS,
- "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv",
- "nlmsg_tty_audit", NULL } },
- { "netlink_fib_lookup_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_connector_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_netfilter_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_dnrt_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "nlmsg_read", "nlmsg_write", "nlmsg_relay",
+ "nlmsg_readpriv", "nlmsg_tty_audit", NULL } },
+ { "netlink_fib_lookup_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_connector_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_netfilter_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_dnrt_socket", { COMMON_SOCK_PERMS, NULL } },
{ "association",
{ "sendto", "recvfrom", "setcontext", "polmatch", NULL } },
- { "netlink_kobject_uevent_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_generic_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_scsitransport_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_rdma_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netlink_crypto_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "appletalk_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_kobject_uevent_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_generic_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_scsitransport_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_rdma_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_crypto_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "appletalk_socket", { COMMON_SOCK_PERMS, NULL } },
{ "packet",
{ "send", "recv", "relabelto", "forward_in", "forward_out", NULL } },
{ "key",
{ "view", "read", "write", "search", "link", "setattr", "create",
NULL } },
{ "dccp_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", "name_connect", NULL } },
+ { COMMON_SOCK_PERMS, "node_bind", "name_connect", NULL } },
{ "memprotect", { "mmap_zero", NULL } },
{ "peer", { "recv", NULL } },
- { "capability2",
- { COMMON_CAP2_PERMS, NULL } },
+ { "capability2", { COMMON_CAP2_PERMS, NULL } },
{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
- { "tun_socket",
- { COMMON_SOCK_PERMS, "attach_queue", NULL } },
- { "binder", { "impersonate", "call", "set_context_mgr", "transfer",
- NULL } },
- { "cap_userns",
- { COMMON_CAP_PERMS, NULL } },
- { "cap2_userns",
- { COMMON_CAP2_PERMS, NULL } },
+ { "tun_socket", { COMMON_SOCK_PERMS, "attach_queue", NULL } },
+ { "binder",
+ { "impersonate", "call", "set_context_mgr", "transfer", NULL } },
+ { "cap_userns", { COMMON_CAP_PERMS, NULL } },
+ { "cap2_userns", { COMMON_CAP2_PERMS, NULL } },
{ "sctp_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", "name_connect", "association", NULL } },
- { "icmp_socket",
- { COMMON_SOCK_PERMS,
- "node_bind", NULL } },
- { "ax25_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "ipx_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "netrom_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "atmpvc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "x25_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "rose_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "decnet_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "atmsvc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "rds_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "irda_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "pppox_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "llc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "can_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "tipc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "bluetooth_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "iucv_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "rxrpc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "isdn_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "phonet_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "ieee802154_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "caif_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "alg_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "nfc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "vsock_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "kcm_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "qipcrtr_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "smc_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "infiniband_pkey",
- { "access", NULL } },
- { "infiniband_endport",
- { "manage_subnet", NULL } },
+ { COMMON_SOCK_PERMS, "node_bind", "name_connect", "association",
+ NULL } },
+ { "icmp_socket", { COMMON_SOCK_PERMS, "node_bind", NULL } },
+ { "ax25_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "ipx_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "netrom_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "atmpvc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "x25_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "rose_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "decnet_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "atmsvc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "rds_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "irda_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "pppox_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "llc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "can_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "tipc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "bluetooth_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "iucv_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "rxrpc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "isdn_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "phonet_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "ieee802154_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "caif_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "alg_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "nfc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "vsock_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "kcm_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "qipcrtr_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "smc_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "infiniband_pkey", { "access", NULL } },
+ { "infiniband_endport", { "manage_subnet", NULL } },
{ "bpf",
{ "map_create", "map_read", "map_write", "prog_load", "prog_run",
NULL } },
- { "xdp_socket",
- { COMMON_SOCK_PERMS, NULL } },
- { "mctp_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { "xdp_socket", { COMMON_SOCK_PERMS, NULL } },
+ { "mctp_socket", { COMMON_SOCK_PERMS, NULL } },
{ "perf_event",
{ "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
- { "anon_inode",
- { COMMON_FILE_PERMS, NULL } },
- { "io_uring",
- { "override_creds", "sqpoll", "cmd", NULL } },
- { "user_namespace",
- { "create", NULL } },
+ { "anon_inode", { COMMON_FILE_PERMS, NULL } },
+ { "io_uring", { "override_creds", "sqpoll", "cmd", NULL } },
+ { "user_namespace", { "create", NULL } },
{ NULL }
- };
+};
#if PF_MAX > 46
#error New address family defined, please update secclass_map.
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
index 693a654714eb..5910bb7c2eca 100644
--- a/security/selinux/include/conditional.h
+++ b/security/selinux/include/conditional.h
@@ -13,8 +13,8 @@
#include "security.h"
-int security_get_bools(struct selinux_policy *policy,
- u32 *len, char ***names, int **values);
+int security_get_bools(struct selinux_policy *policy, u32 *len, char ***names,
+ int **values);
int security_set_bools(u32 len, int *values);
diff --git a/security/selinux/include/ima.h b/security/selinux/include/ima.h
index 93c05e97eb7f..38ab302f5946 100644
--- a/security/selinux/include/ima.h
+++ b/security/selinux/include/ima.h
@@ -25,4 +25,4 @@ static inline void selinux_ima_measure_state_locked(void)
}
#endif
-#endif /* _SELINUX_IMA_H_ */
+#endif /* _SELINUX_IMA_H_ */
diff --git a/security/selinux/include/initial_sid_to_string.h b/security/selinux/include/initial_sid_to_string.h
index ecc6e74fa09b..99b353b2abb4 100644
--- a/security/selinux/include/initial_sid_to_string.h
+++ b/security/selinux/include/initial_sid_to_string.h
@@ -3,33 +3,32 @@
#include <linux/stddef.h>
static const char *const initial_sid_to_string[] = {
- NULL,
- "kernel",
- "security",
- "unlabeled",
- NULL,
- "file",
- NULL,
- NULL,
- "any_socket",
- "port",
- "netif",
- "netmsg",
- "node",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "devnull",
+ NULL, /* zero placeholder, not used */
+ "kernel", /* kernel / SECINITSID_KERNEL */
+ "security", /* security / SECINITSID_SECURITY */
+ "unlabeled", /* unlabeled / SECINITSID_UNLABELED */
+ NULL, /* fs */
+ "file", /* file / SECINITSID_FILE */
+ NULL, /* file_labels */
+ "init", /* init / SECINITSID_INIT */
+ "any_socket", /* any_socket / SECINITSID_ANY_SOCKET */
+ "port", /* port / SECINITSID_PORT */
+ "netif", /* netif / SECINITSID_NETIF */
+ "netmsg", /* netmsg / SECINITSID_NETMSG */
+ "node", /* node / SECINITSID_NODE */
+ NULL, /* igmp_packet */
+ NULL, /* icmp_socket */
+ NULL, /* tcp_socket */
+ NULL, /* sysctl_modprobe */
+ NULL, /* sysctl */
+ NULL, /* sysctl_fs */
+ NULL, /* sysctl_kernel */
+ NULL, /* sysctl_net */
+ NULL, /* sysctl_net_unix */
+ NULL, /* sysctl_vm */
+ NULL, /* sysctl_dev */
+ NULL, /* kmod */
+ NULL, /* policy */
+ NULL, /* scmp_packet */
+ "devnull", /* devnull / SECINITSID_DEVNULL */
};
-
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index 85ec30d11144..2838bdc170dd 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -11,6 +11,7 @@
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
*/
+
#ifndef _SELINUX_NETIF_H_
#define _SELINUX_NETIF_H_
@@ -20,5 +21,4 @@ void sel_netif_flush(void);
int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
-#endif /* _SELINUX_NETIF_H_ */
-
+#endif /* _SELINUX_NETIF_H_ */
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 4d0456d3d459..5731c0dcd3e8 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -32,25 +32,19 @@ void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error,
void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec);
void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec);
-int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
- u16 family,
- u32 *type,
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u16 family, u32 *type,
u32 *sid);
-int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
- u16 family,
- u32 sid);
+int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, u16 family, u32 sid);
int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
- struct sk_buff *skb);
+ struct sk_buff *skb);
int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk);
int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
- struct sk_buff *skb,
- u16 family,
+ struct sk_buff *skb, u16 family,
struct common_audit_data *ad);
-int selinux_netlbl_socket_setsockopt(struct socket *sock,
- int level,
+int selinux_netlbl_socket_setsockopt(struct socket *sock, int level,
int optname);
int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr);
int selinux_netlbl_socket_connect_locked(struct sock *sk,
@@ -62,44 +56,40 @@ static inline void selinux_netlbl_cache_invalidate(void)
return;
}
-static inline void selinux_netlbl_err(struct sk_buff *skb,
- u16 family,
- int error,
- int gateway)
+static inline void selinux_netlbl_err(struct sk_buff *skb, u16 family,
+ int error, int gateway)
{
return;
}
-static inline void selinux_netlbl_sk_security_free(
- struct sk_security_struct *sksec)
+static inline void
+selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
{
return;
}
-static inline void selinux_netlbl_sk_security_reset(
- struct sk_security_struct *sksec)
+static inline void
+selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
{
return;
}
-static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
- u16 family,
- u32 *type,
- u32 *sid)
+static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u16 family,
+ u32 *type, u32 *sid)
{
*type = NETLBL_NLTYPE_NONE;
*sid = SECSID_NULL;
return 0;
}
-static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
- u16 family,
+static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, u16 family,
u32 sid)
{
return 0;
}
-static inline int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
- struct sk_buff *skb)
+static inline int
+selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb)
{
return 0;
}
@@ -117,21 +107,18 @@ static inline void selinux_netlbl_sctp_sk_clone(struct sock *sk,
{
return;
}
-static inline int selinux_netlbl_socket_post_create(struct sock *sk,
- u16 family)
+static inline int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
{
return 0;
}
static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
- struct sk_buff *skb,
- u16 family,
+ struct sk_buff *skb, u16 family,
struct common_audit_data *ad)
{
return 0;
}
static inline int selinux_netlbl_socket_setsockopt(struct socket *sock,
- int level,
- int optname)
+ int level, int optname)
{
return 0;
}
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 8159fd53c3de..dea1d6f3ed2d 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -13,6 +13,7 @@
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2016 Mellanox Technologies
*/
+
#ifndef _SELINUX_OBJSEC_H_
#define _SELINUX_OBJSEC_H_
@@ -29,122 +30,122 @@
#include "avc.h"
struct task_security_struct {
- u32 osid; /* SID prior to last execve */
- u32 sid; /* current SID */
- u32 exec_sid; /* exec SID */
- u32 create_sid; /* fscreate SID */
- u32 keycreate_sid; /* keycreate SID */
- u32 sockcreate_sid; /* fscreate SID */
+ u32 osid; /* SID prior to last execve */
+ u32 sid; /* current SID */
+ u32 exec_sid; /* exec SID */
+ u32 create_sid; /* fscreate SID */
+ u32 keycreate_sid; /* keycreate SID */
+ u32 sockcreate_sid; /* fscreate SID */
} __randomize_layout;
enum label_initialized {
- LABEL_INVALID, /* invalid or not initialized */
- LABEL_INITIALIZED, /* initialized */
+ LABEL_INVALID, /* invalid or not initialized */
+ LABEL_INITIALIZED, /* initialized */
LABEL_PENDING
};
struct inode_security_struct {
- struct inode *inode; /* back pointer to inode object */
- struct list_head list; /* list of inode_security_struct */
- u32 task_sid; /* SID of creating task */
- u32 sid; /* SID of this object */
- u16 sclass; /* security class of this object */
- unsigned char initialized; /* initialization flag */
+ struct inode *inode; /* back pointer to inode object */
+ struct list_head list; /* list of inode_security_struct */
+ u32 task_sid; /* SID of creating task */
+ u32 sid; /* SID of this object */
+ u16 sclass; /* security class of this object */
+ unsigned char initialized; /* initialization flag */
spinlock_t lock;
};
struct file_security_struct {
- u32 sid; /* SID of open file description */
- u32 fown_sid; /* SID of file owner (for SIGIO) */
- u32 isid; /* SID of inode at the time of file open */
- u32 pseqno; /* Policy seqno at the time of file open */
+ u32 sid; /* SID of open file description */
+ u32 fown_sid; /* SID of file owner (for SIGIO) */
+ u32 isid; /* SID of inode at the time of file open */
+ u32 pseqno; /* Policy seqno at the time of file open */
};
struct superblock_security_struct {
- u32 sid; /* SID of file system superblock */
- u32 def_sid; /* default SID for labeling */
- u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
- unsigned short behavior; /* labeling behavior */
- unsigned short flags; /* which mount options were specified */
+ u32 sid; /* SID of file system superblock */
+ u32 def_sid; /* default SID for labeling */
+ u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;
};
struct msg_security_struct {
- u32 sid; /* SID of message */
+ u32 sid; /* SID of message */
};
struct ipc_security_struct {
- u16 sclass; /* security class of this object */
- u32 sid; /* SID of IPC resource */
+ u16 sclass; /* security class of this object */
+ u32 sid; /* SID of IPC resource */
};
struct netif_security_struct {
- struct net *ns; /* network namespace */
- int ifindex; /* device index */
- u32 sid; /* SID for this interface */
+ struct net *ns; /* network namespace */
+ int ifindex; /* device index */
+ u32 sid; /* SID for this interface */
};
struct netnode_security_struct {
union {
- __be32 ipv4; /* IPv4 node address */
- struct in6_addr ipv6; /* IPv6 node address */
+ __be32 ipv4; /* IPv4 node address */
+ struct in6_addr ipv6; /* IPv6 node address */
} addr;
- u32 sid; /* SID for this node */
- u16 family; /* address family */
+ u32 sid; /* SID for this node */
+ u16 family; /* address family */
};
struct netport_security_struct {
- u32 sid; /* SID for this node */
- u16 port; /* port number */
- u8 protocol; /* transport protocol */
+ u32 sid; /* SID for this node */
+ u16 port; /* port number */
+ u8 protocol; /* transport protocol */
};
struct sk_security_struct {
#ifdef CONFIG_NETLABEL
- enum { /* NetLabel state */
- NLBL_UNSET = 0,
- NLBL_REQUIRE,
- NLBL_LABELED,
- NLBL_REQSKB,
- NLBL_CONNLABELED,
+ enum { /* NetLabel state */
+ NLBL_UNSET = 0,
+ NLBL_REQUIRE,
+ NLBL_LABELED,
+ NLBL_REQSKB,
+ NLBL_CONNLABELED,
} nlbl_state;
struct netlbl_lsm_secattr *nlbl_secattr; /* NetLabel sec attributes */
#endif
- u32 sid; /* SID of this object */
- u32 peer_sid; /* SID of peer */
- u16 sclass; /* sock security class */
- enum { /* SCTP association state */
- SCTP_ASSOC_UNSET = 0,
- SCTP_ASSOC_SET,
+ u32 sid; /* SID of this object */
+ u32 peer_sid; /* SID of peer */
+ u16 sclass; /* sock security class */
+ enum { /* SCTP association state */
+ SCTP_ASSOC_UNSET = 0,
+ SCTP_ASSOC_SET,
} sctp_assoc_state;
};
struct tun_security_struct {
- u32 sid; /* SID for the tun device sockets */
+ u32 sid; /* SID for the tun device sockets */
};
struct key_security_struct {
- u32 sid; /* SID of key */
+ u32 sid; /* SID of key */
};
struct ib_security_struct {
- u32 sid; /* SID of the queue pair or MAD agent */
+ u32 sid; /* SID of the queue pair or MAD agent */
};
struct pkey_security_struct {
- u64 subnet_prefix; /* Port subnet prefix */
- u16 pkey; /* PKey number */
- u32 sid; /* SID of pkey */
+ u64 subnet_prefix; /* Port subnet prefix */
+ u16 pkey; /* PKey number */
+ u32 sid; /* SID of pkey */
};
struct bpf_security_struct {
- u32 sid; /* SID of bpf obj creator */
+ u32 sid; /* SID of bpf obj creator */
};
struct perf_event_security_struct {
- u32 sid; /* SID of perf_event obj creator */
+ u32 sid; /* SID of perf_event obj creator */
};
extern struct lsm_blob_sizes selinux_blob_sizes;
@@ -158,22 +159,22 @@ static inline struct file_security_struct *selinux_file(const struct file *file)
return file->f_security + selinux_blob_sizes.lbs_file;
}
-static inline struct inode_security_struct *selinux_inode(
- const struct inode *inode)
+static inline struct inode_security_struct *
+selinux_inode(const struct inode *inode)
{
if (unlikely(!inode->i_security))
return NULL;
return inode->i_security + selinux_blob_sizes.lbs_inode;
}
-static inline struct msg_security_struct *selinux_msg_msg(
- const struct msg_msg *msg_msg)
+static inline struct msg_security_struct *
+selinux_msg_msg(const struct msg_msg *msg_msg)
{
return msg_msg->security + selinux_blob_sizes.lbs_msg_msg;
}
-static inline struct ipc_security_struct *selinux_ipc(
- const struct kern_ipc_perm *ipc)
+static inline struct ipc_security_struct *
+selinux_ipc(const struct kern_ipc_perm *ipc)
{
return ipc->security + selinux_blob_sizes.lbs_ipc;
}
@@ -188,8 +189,8 @@ static inline u32 current_sid(void)
return tsec->sid;
}
-static inline struct superblock_security_struct *selinux_superblock(
- const struct super_block *superblock)
+static inline struct superblock_security_struct *
+selinux_superblock(const struct super_block *superblock)
{
return superblock->s_security + selinux_blob_sizes.lbs_superblock;
}
diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h
index f35d3458e71d..dc3674eb29c1 100644
--- a/security/selinux/include/policycap.h
+++ b/security/selinux/include/policycap.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _SELINUX_POLICYCAP_H_
#define _SELINUX_POLICYCAP_H_
@@ -12,6 +13,7 @@ enum {
POLICYDB_CAP_NNP_NOSUID_TRANSITION,
POLICYDB_CAP_GENFS_SECLABEL_SYMLINKS,
POLICYDB_CAP_IOCTL_SKIP_CLOEXEC,
+ POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT,
__POLICYDB_CAP_MAX
};
#define POLICYDB_CAP_MAX (__POLICYDB_CAP_MAX - 1)
diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h
index 49bbe120d173..2cffcc1ce851 100644
--- a/security/selinux/include/policycap_names.h
+++ b/security/selinux/include/policycap_names.h
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _SELINUX_POLICYCAP_NAMES_H_
#define _SELINUX_POLICYCAP_NAMES_H_
#include "policycap.h"
+/* clang-format off */
/* Policy capability names */
const char *const selinux_policycap_names[__POLICYDB_CAP_MAX] = {
"network_peer_controls",
@@ -14,6 +16,8 @@ const char *const selinux_policycap_names[__POLICYDB_CAP_MAX] = {
"nnp_nosuid_transition",
"genfs_seclabel_symlinks",
"ioctl_skip_cloexec",
+ "userspace_initial_context",
};
+/* clang-format on */
#endif /* _SELINUX_POLICYCAP_NAMES_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index a9de89af8fdc..289bf9233f71 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -21,57 +21,57 @@
#include "flask.h"
#include "policycap.h"
-#define SECSID_NULL 0x00000000 /* unspecified SID */
-#define SECSID_WILD 0xffffffff /* wildcard SID */
-#define SECCLASS_NULL 0x0000 /* no class */
+#define SECSID_NULL 0x00000000 /* unspecified SID */
+#define SECSID_WILD 0xffffffff /* wildcard SID */
+#define SECCLASS_NULL 0x0000 /* no class */
/* Identify specific policy version changes */
-#define POLICYDB_VERSION_BASE 15
-#define POLICYDB_VERSION_BOOL 16
-#define POLICYDB_VERSION_IPV6 17
-#define POLICYDB_VERSION_NLCLASS 18
-#define POLICYDB_VERSION_VALIDATETRANS 19
-#define POLICYDB_VERSION_MLS 19
-#define POLICYDB_VERSION_AVTAB 20
-#define POLICYDB_VERSION_RANGETRANS 21
-#define POLICYDB_VERSION_POLCAP 22
-#define POLICYDB_VERSION_PERMISSIVE 23
-#define POLICYDB_VERSION_BOUNDARY 24
-#define POLICYDB_VERSION_FILENAME_TRANS 25
-#define POLICYDB_VERSION_ROLETRANS 26
-#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
-#define POLICYDB_VERSION_DEFAULT_TYPE 28
-#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
-#define POLICYDB_VERSION_XPERMS_IOCTL 30
-#define POLICYDB_VERSION_INFINIBAND 31
-#define POLICYDB_VERSION_GLBLUB 32
-#define POLICYDB_VERSION_COMP_FTRANS 33 /* compressed filename transitions */
+#define POLICYDB_VERSION_BASE 15
+#define POLICYDB_VERSION_BOOL 16
+#define POLICYDB_VERSION_IPV6 17
+#define POLICYDB_VERSION_NLCLASS 18
+#define POLICYDB_VERSION_VALIDATETRANS 19
+#define POLICYDB_VERSION_MLS 19
+#define POLICYDB_VERSION_AVTAB 20
+#define POLICYDB_VERSION_RANGETRANS 21
+#define POLICYDB_VERSION_POLCAP 22
+#define POLICYDB_VERSION_PERMISSIVE 23
+#define POLICYDB_VERSION_BOUNDARY 24
+#define POLICYDB_VERSION_FILENAME_TRANS 25
+#define POLICYDB_VERSION_ROLETRANS 26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
+#define POLICYDB_VERSION_DEFAULT_TYPE 28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
+#define POLICYDB_VERSION_XPERMS_IOCTL 30
+#define POLICYDB_VERSION_INFINIBAND 31
+#define POLICYDB_VERSION_GLBLUB 32
+#define POLICYDB_VERSION_COMP_FTRANS 33 /* compressed filename transitions */
/* Range of policy versions we understand*/
-#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_COMP_FTRANS
+#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_COMP_FTRANS
/* Mask for just the mount related flags */
-#define SE_MNTMASK 0x0f
+#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
-#define ROOTCONTEXT_MNT 0x04
+#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
#define SBLABEL_MNT 0x10
/* Non-mount related flags */
-#define SE_SBINITIALIZED 0x0100
-#define SE_SBPROC 0x0200
-#define SE_SBGENFS 0x0400
-#define SE_SBGENFS_XATTR 0x0800
-#define SE_SBNATIVE 0x1000
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
+#define SE_SBGENFS 0x0400
+#define SE_SBGENFS_XATTR 0x0800
+#define SE_SBNATIVE 0x1000
#define CONTEXT_STR "context"
#define FSCONTEXT_STR "fscontext"
-#define ROOTCONTEXT_STR "rootcontext"
+#define ROOTCONTEXT_STR "rootcontext"
#define DEFCONTEXT_STR "defcontext"
-#define SECLABEL_STR "seclabel"
+#define SECLABEL_STR "seclabel"
struct netlbl_lsm_secattr;
@@ -81,11 +81,11 @@ extern int selinux_enabled_boot;
* type_datum properties
* available at the kernel policy version >= POLICYDB_VERSION_BOUNDARY
*/
-#define TYPEDATUM_PROPERTY_PRIMARY 0x0001
-#define TYPEDATUM_PROPERTY_ATTRIBUTE 0x0002
+#define TYPEDATUM_PROPERTY_PRIMARY 0x0001
+#define TYPEDATUM_PROPERTY_ATTRIBUTE 0x0002
/* limitation of boundary depth */
-#define POLICYDB_BOUNDS_MAXDEPTH 4
+#define POLICYDB_BOUNDS_MAXDEPTH 4
struct selinux_policy;
@@ -189,6 +189,12 @@ static inline bool selinux_policycap_ioctl_skip_cloexec(void)
selinux_state.policycap[POLICYDB_CAP_IOCTL_SKIP_CLOEXEC]);
}
+static inline bool selinux_policycap_userspace_initial_context(void)
+{
+ return READ_ONCE(
+ selinux_state.policycap[POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT]);
+}
+
struct selinux_policy_convert_data;
struct selinux_load_state {
@@ -214,12 +220,12 @@ struct av_decision {
u32 flags;
};
-#define XPERMS_ALLOWED 1
+#define XPERMS_ALLOWED 1
#define XPERMS_AUDITALLOW 2
-#define XPERMS_DONTAUDIT 4
+#define XPERMS_DONTAUDIT 4
-#define security_xperm_set(perms, x) ((perms)[(x) >> 5] |= 1 << ((x) & 0x1f))
-#define security_xperm_test(perms, x) (1 & ((perms)[(x) >> 5] >> ((x) & 0x1f)))
+#define security_xperm_set(perms, x) ((perms)[(x) >> 5] |= 1 << ((x)&0x1f))
+#define security_xperm_test(perms, x) (1 & ((perms)[(x) >> 5] >> ((x)&0x1f)))
struct extended_perms_data {
u32 p[8];
};
@@ -233,23 +239,22 @@ struct extended_perms_decision {
};
struct extended_perms {
- u16 len; /* length associated decision chain */
+ u16 len; /* length associated decision chain */
struct extended_perms_data drivers; /* flag drivers that are used */
};
/* definitions of av_decision.flags */
-#define AVD_FLAGS_PERMISSIVE 0x0001
+#define AVD_FLAGS_PERMISSIVE 0x0001
-void security_compute_av(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd,
+void security_compute_av(u32 ssid, u32 tsid, u16 tclass,
+ struct av_decision *avd,
struct extended_perms *xperms);
-void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass,
- u8 driver,
+void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass, u8 driver,
struct extended_perms_decision *xpermd);
-void security_compute_av_user(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd);
+void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass,
+ struct av_decision *avd);
int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
const struct qstr *qstr, u32 *out_sid);
@@ -288,8 +293,7 @@ int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid);
int security_netif_sid(char *name, u32 *if_sid);
-int security_node_sid(u16 domain, void *addr, u32 addrlen,
- u32 *out_sid);
+int security_node_sid(u16 domain, void *addr, u32 addrlen, u32 *out_sid);
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
@@ -301,50 +305,47 @@ int security_bounded_transition(u32 oldsid, u32 newsid);
int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
-int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
- u32 xfrm_sid,
+int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, u32 xfrm_sid,
u32 *peer_sid);
-int security_get_classes(struct selinux_policy *policy,
- char ***classes, u32 *nclasses);
-int security_get_permissions(struct selinux_policy *policy,
- const char *class, char ***perms, u32 *nperms);
+int security_get_classes(struct selinux_policy *policy, char ***classes,
+ u32 *nclasses);
+int security_get_permissions(struct selinux_policy *policy, const char *class,
+ char ***perms, u32 *nperms);
int security_get_reject_unknown(void);
int security_get_allow_unknown(void);
-#define SECURITY_FS_USE_XATTR 1 /* use xattr */
-#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
-#define SECURITY_FS_USE_TASK 3 /* use task SIDs, e.g. pipefs/sockfs */
-#define SECURITY_FS_USE_GENFS 4 /* use the genfs support */
-#define SECURITY_FS_USE_NONE 5 /* no labeling support */
-#define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */
-#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
-#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
+#define SECURITY_FS_USE_XATTR 1 /* use xattr */
+#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
+#define SECURITY_FS_USE_TASK 3 /* use task SIDs, e.g. pipefs/sockfs */
+#define SECURITY_FS_USE_GENFS 4 /* use the genfs support */
+#define SECURITY_FS_USE_NONE 5 /* no labeling support */
+#define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */
+#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
+#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
int security_fs_use(struct super_block *sb);
int security_genfs_sid(const char *fstype, const char *path, u16 sclass,
u32 *sid);
-int selinux_policy_genfs_sid(struct selinux_policy *policy,
- const char *fstype, const char *path, u16 sclass,
- u32 *sid);
+int selinux_policy_genfs_sid(struct selinux_policy *policy, const char *fstype,
+ const char *path, u16 sclass, u32 *sid);
#ifdef CONFIG_NETLABEL
int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
u32 *sid);
-int security_netlbl_sid_to_secattr(u32 sid,
- struct netlbl_lsm_secattr *secattr);
+int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr);
#else
-static inline int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
- u32 *sid)
+static inline int
+security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, u32 *sid)
{
return -EIDRM;
}
-static inline int security_netlbl_sid_to_secattr(u32 sid,
- struct netlbl_lsm_secattr *secattr)
+static inline int
+security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
{
return -ENOENT;
}
@@ -357,13 +358,13 @@ const char *security_get_initial_sid_context(u32 sid);
*/
extern struct page *selinux_kernel_status_page(void);
-#define SELINUX_KERNEL_STATUS_VERSION 1
+#define SELINUX_KERNEL_STATUS_VERSION 1
struct selinux_kernel_status {
- u32 version; /* version number of the structure */
- u32 sequence; /* sequence number of seqlock logic */
- u32 enforcing; /* current setting of enforcing mode */
- u32 policyload; /* times of policy reloaded */
- u32 deny_unknown; /* current setting of deny_unknown */
+ u32 version; /* version number of the structure */
+ u32 sequence; /* sequence number of seqlock logic */
+ u32 enforcing; /* current setting of enforcing mode */
+ u32 policyload; /* times of policy reloaded */
+ u32 deny_unknown; /* current setting of deny_unknown */
/*
* The version > 0 supports above members.
*/
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index c75839860200..de485556ae29 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -5,6 +5,7 @@
* Author : Trent Jaeger, <jaegert@us.ibm.com>
* Updated : Venkat Yekkirala, <vyekkirala@TrustedCS.com>
*/
+
#ifndef _SELINUX_XFRM_H_
#define _SELINUX_XFRM_H_
@@ -13,8 +14,7 @@
#include <net/xfrm.h>
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx,
- gfp_t gfp);
+ struct xfrm_user_sec_ctx *uctx, gfp_t gfp);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 6c596ae7fef9..0619a1cbbfbe 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -336,12 +336,9 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino);
/* declaration for sel_make_policy_nodes */
-static struct dentry *sel_make_disconnected_dir(struct super_block *sb,
+static struct dentry *sel_make_swapover_dir(struct super_block *sb,
unsigned long *ino);
-/* declaration for sel_make_policy_nodes */
-static void sel_remove_entries(struct dentry *de);
-
static ssize_t sel_read_mls(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -508,13 +505,13 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
struct selinux_policy *newpolicy)
{
int ret = 0;
- struct dentry *tmp_parent, *tmp_bool_dir, *tmp_class_dir, *old_dentry;
- unsigned int tmp_bool_num, old_bool_num;
- char **tmp_bool_names, **old_bool_names;
- int *tmp_bool_values, *old_bool_values;
+ struct dentry *tmp_parent, *tmp_bool_dir, *tmp_class_dir;
+ unsigned int bool_num = 0;
+ char **bool_names = NULL;
+ int *bool_values = NULL;
unsigned long tmp_ino = fsi->last_ino; /* Don't increment last_ino in this function */
- tmp_parent = sel_make_disconnected_dir(fsi->sb, &tmp_ino);
+ tmp_parent = sel_make_swapover_dir(fsi->sb, &tmp_ino);
if (IS_ERR(tmp_parent))
return PTR_ERR(tmp_parent);
@@ -532,8 +529,8 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
goto out;
}
- ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num,
- &tmp_bool_names, &tmp_bool_values);
+ ret = sel_make_bools(newpolicy, tmp_bool_dir, &bool_num,
+ &bool_names, &bool_values);
if (ret)
goto out;
@@ -542,38 +539,30 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
if (ret)
goto out;
+ lock_rename(tmp_parent, fsi->sb->s_root);
+
/* booleans */
- old_dentry = fsi->bool_dir;
- lock_rename(tmp_bool_dir, old_dentry);
d_exchange(tmp_bool_dir, fsi->bool_dir);
- old_bool_num = fsi->bool_num;
- old_bool_names = fsi->bool_pending_names;
- old_bool_values = fsi->bool_pending_values;
-
- fsi->bool_num = tmp_bool_num;
- fsi->bool_pending_names = tmp_bool_names;
- fsi->bool_pending_values = tmp_bool_values;
-
- sel_remove_old_bool_data(old_bool_num, old_bool_names, old_bool_values);
+ swap(fsi->bool_num, bool_num);
+ swap(fsi->bool_pending_names, bool_names);
+ swap(fsi->bool_pending_values, bool_values);
fsi->bool_dir = tmp_bool_dir;
- unlock_rename(tmp_bool_dir, old_dentry);
/* classes */
- old_dentry = fsi->class_dir;
- lock_rename(tmp_class_dir, old_dentry);
d_exchange(tmp_class_dir, fsi->class_dir);
fsi->class_dir = tmp_class_dir;
- unlock_rename(tmp_class_dir, old_dentry);
+
+ unlock_rename(tmp_parent, fsi->sb->s_root);
out:
+ sel_remove_old_bool_data(bool_num, bool_names, bool_values);
/* Since the other temporary dirs are children of tmp_parent
* this will handle all the cleanup in the case of a failure before
* the swapover
*/
- sel_remove_entries(tmp_parent);
- dput(tmp_parent); /* d_genocide() only handles the children */
+ simple_recursive_removal(tmp_parent, NULL);
return ret;
}
@@ -1351,54 +1340,48 @@ static const struct file_operations sel_commit_bools_ops = {
.llseek = generic_file_llseek,
};
-static void sel_remove_entries(struct dentry *de)
-{
- d_genocide(de);
- shrink_dcache_parent(de);
-}
-
static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_dir,
unsigned int *bool_num, char ***bool_pending_names,
int **bool_pending_values)
{
int ret;
- ssize_t len;
- struct dentry *dentry = NULL;
- struct inode *inode = NULL;
- struct inode_security_struct *isec;
- char **names = NULL, *page;
+ char **names, *page;
u32 i, num;
- int *values = NULL;
- u32 sid;
- ret = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- goto out;
+ return -ENOMEM;
- ret = security_get_bools(newpolicy, &num, &names, &values);
+ ret = security_get_bools(newpolicy, &num, &names, bool_pending_values);
if (ret)
goto out;
+ *bool_num = num;
+ *bool_pending_names = names;
+
for (i = 0; i < num; i++) {
- ret = -ENOMEM;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct inode_security_struct *isec;
+ ssize_t len;
+ u32 sid;
+
+ len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
+ if (len >= PAGE_SIZE) {
+ ret = -ENAMETOOLONG;
+ break;
+ }
dentry = d_alloc_name(bool_dir, names[i]);
- if (!dentry)
- goto out;
+ if (!dentry) {
+ ret = -ENOMEM;
+ break;
+ }
- ret = -ENOMEM;
inode = sel_make_inode(bool_dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
if (!inode) {
dput(dentry);
- goto out;
- }
-
- ret = -ENAMETOOLONG;
- len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
- if (len >= PAGE_SIZE) {
- dput(dentry);
- iput(inode);
- goto out;
+ ret = -ENOMEM;
+ break;
}
isec = selinux_inode(inode);
@@ -1416,23 +1399,8 @@ static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_
inode->i_ino = i|SEL_BOOL_INO_OFFSET;
d_add(dentry, inode);
}
- *bool_num = num;
- *bool_pending_names = names;
- *bool_pending_values = values;
-
- free_page((unsigned long)page);
- return 0;
out:
free_page((unsigned long)page);
-
- if (names) {
- for (i = 0; i < num; i++)
- kfree(names[i]);
- kfree(names);
- }
- kfree(values);
- sel_remove_entries(bool_dir);
-
return ret;
}
@@ -1961,20 +1929,40 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
return dentry;
}
-static struct dentry *sel_make_disconnected_dir(struct super_block *sb,
+static int reject_all(struct mnt_idmap *idmap, struct inode *inode, int mask)
+{
+ return -EPERM; // no access for anyone, root or no root.
+}
+
+static const struct inode_operations swapover_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .permission = reject_all,
+};
+
+static struct dentry *sel_make_swapover_dir(struct super_block *sb,
unsigned long *ino)
{
- struct inode *inode = sel_make_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ struct dentry *dentry = d_alloc_name(sb->s_root, ".swapover");
+ struct inode *inode;
- if (!inode)
+ if (!dentry)
return ERR_PTR(-ENOMEM);
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
+ inode = sel_make_inode(sb, S_IFDIR);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ inode->i_op = &swapover_dir_inode_operations;
inode->i_ino = ++(*ino);
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
- return d_obtain_alias(inode);
+ inode_lock(sb->s_root->d_inode);
+ d_add(dentry, inode);
+ inc_nlink(sb->s_root->d_inode);
+ inode_unlock(sb->s_root->d_inode);
+ return dentry;
}
#define NULL_FILE_NAME "null"
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 8751a602ead2..697eb4352439 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -96,12 +96,34 @@ avtab_insert_node(struct avtab *h, struct avtab_node **dst,
return newnode;
}
+static int avtab_node_cmp(const struct avtab_key *key1,
+ const struct avtab_key *key2)
+{
+ u16 specified = key1->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+ if (key1->source_type == key2->source_type &&
+ key1->target_type == key2->target_type &&
+ key1->target_class == key2->target_class &&
+ (specified & key2->specified))
+ return 0;
+ if (key1->source_type < key2->source_type)
+ return -1;
+ if (key1->source_type == key2->source_type &&
+ key1->target_type < key2->target_type)
+ return -1;
+ if (key1->source_type == key2->source_type &&
+ key1->target_type == key2->target_type &&
+ key1->target_class < key2->target_class)
+ return -1;
+ return 1;
+}
+
static int avtab_insert(struct avtab *h, const struct avtab_key *key,
const struct avtab_datum *datum)
{
u32 hvalue;
struct avtab_node *prev, *cur, *newnode;
- u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ int cmp;
if (!h || !h->nslot || h->nel == U32_MAX)
return -EINVAL;
@@ -110,23 +132,11 @@ static int avtab_insert(struct avtab *h, const struct avtab_key *key,
for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class == cur->key.target_class &&
- (specified & cur->key.specified)) {
- /* extended perms may not be unique */
- if (specified & AVTAB_XPERMS)
- break;
+ cmp = avtab_node_cmp(key, &cur->key);
+ /* extended perms may not be unique */
+ if (cmp == 0 && !(key->specified & AVTAB_XPERMS))
return -EEXIST;
- }
- if (key->source_type < cur->key.source_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type < cur->key.target_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class < cur->key.target_class)
+ if (cmp <= 0)
break;
}
@@ -148,7 +158,7 @@ struct avtab_node *avtab_insert_nonunique(struct avtab *h,
{
u32 hvalue;
struct avtab_node *prev, *cur;
- u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ int cmp;
if (!h || !h->nslot || h->nel == U32_MAX)
return NULL;
@@ -156,19 +166,8 @@ struct avtab_node *avtab_insert_nonunique(struct avtab *h,
for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class == cur->key.target_class &&
- (specified & cur->key.specified))
- break;
- if (key->source_type < cur->key.source_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type < cur->key.target_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class < cur->key.target_class)
+ cmp = avtab_node_cmp(key, &cur->key);
+ if (cmp <= 0)
break;
}
return avtab_insert_node(h, prev ? &prev->next : &h->htable[hvalue],
@@ -183,7 +182,7 @@ struct avtab_node *avtab_search_node(struct avtab *h,
{
u32 hvalue;
struct avtab_node *cur;
- u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ int cmp;
if (!h || !h->nslot)
return NULL;
@@ -191,20 +190,10 @@ struct avtab_node *avtab_search_node(struct avtab *h,
hvalue = avtab_hash(key, h->mask);
for (cur = h->htable[hvalue]; cur;
cur = cur->next) {
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class == cur->key.target_class &&
- (specified & cur->key.specified))
+ cmp = avtab_node_cmp(key, &cur->key);
+ if (cmp == 0)
return cur;
-
- if (key->source_type < cur->key.source_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type < cur->key.target_type)
- break;
- if (key->source_type == cur->key.source_type &&
- key->target_type == cur->key.target_type &&
- key->target_class < cur->key.target_class)
+ if (cmp < 0)
break;
}
return NULL;
@@ -213,27 +202,19 @@ struct avtab_node *avtab_search_node(struct avtab *h,
struct avtab_node*
avtab_search_node_next(struct avtab_node *node, u16 specified)
{
+ struct avtab_key tmp_key;
struct avtab_node *cur;
+ int cmp;
if (!node)
return NULL;
-
- specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ tmp_key = node->key;
+ tmp_key.specified = specified;
for (cur = node->next; cur; cur = cur->next) {
- if (node->key.source_type == cur->key.source_type &&
- node->key.target_type == cur->key.target_type &&
- node->key.target_class == cur->key.target_class &&
- (specified & cur->key.specified))
+ cmp = avtab_node_cmp(&tmp_key, &cur->key);
+ if (cmp == 0)
return cur;
-
- if (node->key.source_type < cur->key.source_type)
- break;
- if (node->key.source_type == cur->key.source_type &&
- node->key.target_type < cur->key.target_type)
- break;
- if (node->key.source_type == cur->key.source_type &&
- node->key.target_type == cur->key.target_type &&
- node->key.target_class < cur->key.target_class)
+ if (cmp < 0)
break;
}
return NULL;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 595a435ea9c8..3b19ad28c922 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -409,16 +409,9 @@ out:
static u32 filenametr_hash(const void *k)
{
const struct filename_trans_key *ft = k;
- unsigned long hash;
- unsigned int byte_num;
- unsigned char focus;
+ unsigned long salt = ft->ttype ^ ft->tclass;
- hash = ft->ttype ^ ft->tclass;
-
- byte_num = 0;
- while ((focus = ft->name[byte_num++]))
- hash = partial_name_hash(focus, hash);
- return hash;
+ return full_name_hash((void *)salt, ft->name, strlen(ft->name));
}
static int filenametr_cmp(const void *k1, const void *k2)
@@ -864,6 +857,8 @@ void policydb_destroy(struct policydb *p)
int policydb_load_isids(struct policydb *p, struct sidtab *s)
{
struct ocontext *head, *c;
+ bool isid_init_supported = ebitmap_get_bit(&p->policycaps,
+ POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT);
int rc;
rc = sidtab_init(s);
@@ -887,6 +882,13 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
if (!name)
continue;
+ /*
+ * Also ignore SECINITSID_INIT if the policy doesn't declare
+ * support for it
+ */
+ if (sid == SECINITSID_INIT && !isid_init_supported)
+ continue;
+
rc = sidtab_set_initial(s, sid, &c->context[0]);
if (rc) {
pr_err("SELinux: unable to load initial SID %s.\n",
@@ -894,6 +896,24 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
sidtab_destroy(s);
return rc;
}
+
+ /*
+ * If the policy doesn't support the "userspace_initial_context"
+ * capability, set SECINITSID_INIT to the same context as
+ * SECINITSID_KERNEL. This ensures the same behavior as before
+ * the reintroduction of SECINITSID_INIT, where all tasks
+ * started before policy load would initially get the context
+ * corresponding to SECINITSID_KERNEL.
+ */
+ if (sid == SECINITSID_KERNEL && !isid_init_supported) {
+ rc = sidtab_set_initial(s, SECINITSID_INIT, &c->context[0]);
+ if (rc) {
+ pr_err("SELinux: unable to load initial SID %s.\n",
+ name);
+ sidtab_destroy(s);
+ return rc;
+ }
+ }
}
return 0;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 1eeffc66ea7d..e88b1b6c4adb 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1322,8 +1322,19 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
if (!selinux_initialized()) {
if (sid <= SECINITSID_NUM) {
char *scontextp;
- const char *s = initial_sid_to_string[sid];
+ const char *s;
+ /*
+ * Before the policy is loaded, translate
+ * SECINITSID_INIT to "kernel", because systemd and
+ * libselinux < 2.6 take a getcon_raw() result that is
+ * both non-null and not "kernel" to mean that a policy
+ * is already loaded.
+ */
+ if (sid == SECINITSID_INIT)
+ sid = SECINITSID_KERNEL;
+
+ s = initial_sid_to_string[sid];
if (!s)
return -EINVAL;
*scontext_len = strlen(s) + 1;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 65130a791f57..c126f6a16de4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -43,6 +43,7 @@
#include <linux/fs_parser.h>
#include <linux/watch_queue.h>
#include <linux/io_uring.h>
+#include <uapi/linux/lsm.h>
#include "smack.h"
#define TRANS_TRUE "TRUE"
@@ -3626,6 +3627,35 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
}
/**
+ * smack_getselfattr - Smack current process attribute
+ * @attr: which attribute to fetch
+ * @ctx: buffer to receive the result
+ * @size: available size in, actual size out
+ * @flags: unused
+ *
+ * Fill the passed user space @ctx with the details of the requested
+ * attribute.
+ *
+ * Returns the number of attributes on success, an error code otherwise.
+ * There will only ever be one attribute.
+ */
+static int smack_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t *size, u32 flags)
+{
+ int rc;
+ struct smack_known *skp;
+
+ if (attr != LSM_ATTR_CURRENT)
+ return -EOPNOTSUPP;
+
+ skp = smk_of_current();
+ rc = lsm_fill_user_ctx(ctx, size,
+ skp->smk_known, strlen(skp->smk_known) + 1,
+ LSM_ID_SMACK, 0);
+ return (!rc ? 1 : rc);
+}
+
+/**
* smack_getprocattr - Smack process attribute access
* @p: the object task
* @name: the name of the attribute in /proc/.../attr
@@ -3654,8 +3684,8 @@ static int smack_getprocattr(struct task_struct *p, const char *name, char **val
}
/**
- * smack_setprocattr - Smack process attribute setting
- * @name: the name of the attribute in /proc/.../attr
+ * do_setattr - Smack process attribute setting
+ * @attr: the ID of the attribute
* @value: the value to set
* @size: the size of the value
*
@@ -3664,7 +3694,7 @@ static int smack_getprocattr(struct task_struct *p, const char *name, char **val
*
* Returns the length of the smack label or an error code
*/
-static int smack_setprocattr(const char *name, void *value, size_t size)
+static int do_setattr(u64 attr, void *value, size_t size)
{
struct task_smack *tsp = smack_cred(current_cred());
struct cred *new;
@@ -3678,8 +3708,8 @@ static int smack_setprocattr(const char *name, void *value, size_t size)
if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
return -EINVAL;
- if (strcmp(name, "current") != 0)
- return -EINVAL;
+ if (attr != LSM_ATTR_CURRENT)
+ return -EOPNOTSUPP;
skp = smk_import_entry(value, size);
if (IS_ERR(skp))
@@ -3719,6 +3749,49 @@ static int smack_setprocattr(const char *name, void *value, size_t size)
}
/**
+ * smack_setselfattr - Set a Smack process attribute
+ * @attr: which attribute to set
+ * @ctx: buffer containing the data
+ * @size: size of @ctx
+ * @flags: unused
+ *
+ * Fill the passed user space @ctx with the details of the requested
+ * attribute.
+ *
+ * Returns 0 on success, an error code otherwise.
+ */
+static int smack_setselfattr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, u32 flags)
+{
+ int rc;
+
+ rc = do_setattr(attr, ctx->ctx, ctx->ctx_len);
+ if (rc > 0)
+ return 0;
+ return rc;
+}
+
+/**
+ * smack_setprocattr - Smack process attribute setting
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: the value to set
+ * @size: the size of the value
+ *
+ * Sets the Smack value of the task. Only setting self
+ * is permitted and only with privilege
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_setprocattr(const char *name, void *value, size_t size)
+{
+ int attr = lsm_name_to_attr(name);
+
+ if (attr != LSM_ATTR_UNDEF)
+ return do_setattr(attr, value, size);
+ return -EINVAL;
+}
+
+/**
* smack_unix_stream_connect - Smack access on UDS
* @sock: one sock
* @other: the other sock
@@ -4933,6 +5006,11 @@ struct lsm_blob_sizes smack_blob_sizes __ro_after_init = {
.lbs_xattr_count = SMACK_INODE_INIT_XATTRS,
};
+static const struct lsm_id smack_lsmid = {
+ .name = "smack",
+ .id = LSM_ID_SMACK,
+};
+
static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
@@ -4973,6 +5051,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
LSM_HOOK_INIT(file_lock, smack_file_lock),
LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
LSM_HOOK_INIT(mmap_file, smack_mmap_file),
@@ -5027,6 +5106,8 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(d_instantiate, smack_d_instantiate),
+ LSM_HOOK_INIT(getselfattr, smack_getselfattr),
+ LSM_HOOK_INIT(setselfattr, smack_setselfattr),
LSM_HOOK_INIT(getprocattr, smack_getprocattr),
LSM_HOOK_INIT(setprocattr, smack_setprocattr),
@@ -5140,7 +5221,7 @@ static __init int smack_init(void)
/*
* Register with LSM
*/
- security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), "smack");
+ security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), &smack_lsmid);
smack_enabled = 1;
pr_info("Smack: Initializing.\n");
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 255f1b470295..3c3af149bf1c 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -6,6 +6,7 @@
*/
#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
#include "common.h"
/**
@@ -542,6 +543,11 @@ static void tomoyo_task_free(struct task_struct *task)
}
}
+static const struct lsm_id tomoyo_lsmid = {
+ .name = "tomoyo",
+ .id = LSM_ID_TOMOYO,
+};
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -568,6 +574,7 @@ static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
@@ -595,7 +602,8 @@ static int __init tomoyo_init(void)
struct tomoyo_task *s = tomoyo_task(current);
/* register ourselves with the security framework */
- security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), "tomoyo");
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks),
+ &tomoyo_lsmid);
pr_info("TOMOYO Linux initialized\n");
s->domain_info = &tomoyo_kernel_domain;
atomic_inc(&tomoyo_kernel_domain.users);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 2503cf153d4a..49dc52b454ef 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -18,6 +18,7 @@
#include <linux/task_work.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <uapi/linux/lsm.h>
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
@@ -421,6 +422,11 @@ static int yama_ptrace_traceme(struct task_struct *parent)
return rc;
}
+static const struct lsm_id yama_lsmid = {
+ .name = "yama",
+ .id = LSM_ID_YAMA,
+};
+
static struct security_hook_list yama_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
@@ -471,7 +477,7 @@ static inline void yama_init_sysctl(void) { }
static int __init yama_init(void)
{
pr_info("Yama: becoming mindful.\n");
- security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), "yama");
+ security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), &yama_lsmid);
yama_init_sysctl();
return 0;
}
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 194e1179a253..c1afb721b4c6 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -211,7 +211,6 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
if (cfg->bus == SPI) {
cs35l41->index = id;
-#if IS_ENABLED(CONFIG_SPI)
/*
* Manually set the Chip Select for the second amp <cs_gpio_index> in the node.
* This is only supported for systems with 2 amps, since we cannot expand the
@@ -220,7 +219,7 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
* uses a native chip select), to ensure the second amp does not clash with the
* first.
*/
- if (cfg->cs_gpio_index >= 0) {
+ if (IS_ENABLED(CONFIG_SPI) && cfg->cs_gpio_index >= 0) {
spi = to_spi_device(cs35l41->dev);
if (cfg->num_amps != 2) {
@@ -251,7 +250,6 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
spi_setup(spi);
}
}
-#endif
} else {
if (cfg->num_amps > 2)
/*
diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
index 27d7fbc56b4c..080426de9083 100644
--- a/sound/pci/hda/cs35l56_hda_spi.c
+++ b/sound/pci/hda/cs35l56_hda_spi.c
@@ -33,7 +33,7 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
return ret;
}
- ret = cs35l56_hda_common_probe(cs35l56, spi->chip_select);
+ ret = cs35l56_hda_common_probe(cs35l56, spi_get_chipselect(spi, 0));
if (ret)
return ret;
ret = cs35l56_irq_request(&cs35l56->base, spi->irq);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c3a756528886..70b17b08d4ff 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9799,6 +9799,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
@@ -9881,6 +9882,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
@@ -9925,6 +9927,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 2fb1a7037c82..dfe281b57aa6 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -65,6 +65,15 @@ enum calib_data {
CALIB_MAX
};
+struct tas2781_hda {
+ struct device *dev;
+ struct tasdevice_priv *priv;
+ struct snd_kcontrol *dsp_prog_ctl;
+ struct snd_kcontrol *dsp_conf_ctl;
+ struct snd_kcontrol *prof_ctl;
+ struct snd_kcontrol *snd_ctls[3];
+};
+
static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
{
struct tasdevice_priv *tas_priv = data;
@@ -125,26 +134,26 @@ err:
static void tas2781_hda_playback_hook(struct device *dev, int action)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- dev_dbg(tas_priv->dev, "%s: action = %d\n", __func__, action);
+ dev_dbg(tas_hda->dev, "%s: action = %d\n", __func__, action);
switch (action) {
case HDA_GEN_PCM_ACT_OPEN:
pm_runtime_get_sync(dev);
- mutex_lock(&tas_priv->codec_lock);
- tasdevice_tuning_switch(tas_priv, 0);
- mutex_unlock(&tas_priv->codec_lock);
+ mutex_lock(&tas_hda->priv->codec_lock);
+ tasdevice_tuning_switch(tas_hda->priv, 0);
+ mutex_unlock(&tas_hda->priv->codec_lock);
break;
case HDA_GEN_PCM_ACT_CLOSE:
- mutex_lock(&tas_priv->codec_lock);
- tasdevice_tuning_switch(tas_priv, 1);
- mutex_unlock(&tas_priv->codec_lock);
+ mutex_lock(&tas_hda->priv->codec_lock);
+ tasdevice_tuning_switch(tas_hda->priv, 1);
+ mutex_unlock(&tas_hda->priv->codec_lock);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
break;
default:
- dev_dbg(tas_priv->dev, "Playback action not supported: %d\n",
+ dev_dbg(tas_hda->dev, "Playback action not supported: %d\n",
action);
break;
}
@@ -421,9 +430,9 @@ static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
}
}
-/* Update the calibrate data, including speaker impedance, f0, etc, into algo.
+/* Update the calibration data, including speaker impedance, f0, etc, into algo.
* Calibrate data is done by manufacturer in the factory. These data are used
- * by Algo for calucating the speaker temperature, speaker membrance excursion
+ * by Algo for calculating the speaker temperature, speaker membrane excursion
* and f0 in real time during playback.
*/
static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
@@ -477,9 +486,28 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
return 0;
}
+static void tas2781_hda_remove_controls(struct tas2781_hda *tas_hda)
+{
+ struct hda_codec *codec = tas_hda->priv->codec;
+
+ if (tas_hda->dsp_prog_ctl)
+ snd_ctl_remove(codec->card, tas_hda->dsp_prog_ctl);
+
+ if (tas_hda->dsp_conf_ctl)
+ snd_ctl_remove(codec->card, tas_hda->dsp_conf_ctl);
+
+ for (int i = ARRAY_SIZE(tas_hda->snd_ctls) - 1; i >= 0; i--)
+ if (tas_hda->snd_ctls[i])
+ snd_ctl_remove(codec->card, tas_hda->snd_ctls[i]);
+
+ if (tas_hda->prof_ctl)
+ snd_ctl_remove(codec->card, tas_hda->prof_ctl);
+}
+
static void tasdev_fw_ready(const struct firmware *fmw, void *context)
{
struct tasdevice_priv *tas_priv = context;
+ struct tas2781_hda *tas_hda = dev_get_drvdata(tas_priv->dev);
struct hda_codec *codec = tas_priv->codec;
int i, ret;
@@ -490,8 +518,8 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
if (ret)
goto out;
- ret = snd_ctl_add(codec->card,
- snd_ctl_new1(&tas2781_prof_ctrl, tas_priv));
+ tas_hda->prof_ctl = snd_ctl_new1(&tas2781_prof_ctrl, tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->prof_ctl);
if (ret) {
dev_err(tas_priv->dev,
"Failed to add KControl %s = %d\n",
@@ -500,8 +528,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
}
for (i = 0; i < ARRAY_SIZE(tas2781_snd_controls); i++) {
- ret = snd_ctl_add(codec->card,
- snd_ctl_new1(&tas2781_snd_controls[i], tas_priv));
+ tas_hda->snd_ctls[i] = snd_ctl_new1(&tas2781_snd_controls[i],
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->snd_ctls[i]);
if (ret) {
dev_err(tas_priv->dev,
"Failed to add KControl %s = %d\n",
@@ -523,8 +552,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
goto out;
}
- ret = snd_ctl_add(codec->card,
- snd_ctl_new1(&tas2781_dsp_prog_ctrl, tas_priv));
+ tas_hda->dsp_prog_ctl = snd_ctl_new1(&tas2781_dsp_prog_ctrl,
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->dsp_prog_ctl);
if (ret) {
dev_err(tas_priv->dev,
"Failed to add KControl %s = %d\n",
@@ -532,8 +562,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
goto out;
}
- ret = snd_ctl_add(codec->card,
- snd_ctl_new1(&tas2781_dsp_conf_ctrl, tas_priv));
+ tas_hda->dsp_conf_ctl = snd_ctl_new1(&tas2781_dsp_conf_ctrl,
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->dsp_conf_ctl);
if (ret) {
dev_err(tas_priv->dev,
"Failed to add KControl %s = %d\n",
@@ -554,27 +585,27 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
tas2781_save_calibration(tas_priv);
out:
- mutex_unlock(&tas_priv->codec_lock);
+ mutex_unlock(&tas_hda->priv->codec_lock);
if (fmw)
release_firmware(fmw);
- pm_runtime_mark_last_busy(tas_priv->dev);
- pm_runtime_put_autosuspend(tas_priv->dev);
+ pm_runtime_mark_last_busy(tas_hda->dev);
+ pm_runtime_put_autosuspend(tas_hda->dev);
}
static int tas2781_hda_bind(struct device *dev, struct device *master,
void *master_data)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
struct hda_component *comps = master_data;
struct hda_codec *codec;
unsigned int subid;
int ret;
- if (!comps || tas_priv->index < 0 ||
- tas_priv->index >= HDA_MAX_COMPONENTS)
+ if (!comps || tas_hda->priv->index < 0 ||
+ tas_hda->priv->index >= HDA_MAX_COMPONENTS)
return -EINVAL;
- comps = &comps[tas_priv->index];
+ comps = &comps[tas_hda->priv->index];
if (comps->dev)
return -EBUSY;
@@ -583,10 +614,10 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
switch (subid) {
case 0x17aa:
- tas_priv->catlog_id = LENOVO;
+ tas_hda->priv->catlog_id = LENOVO;
break;
default:
- tas_priv->catlog_id = OTHERS;
+ tas_hda->priv->catlog_id = OTHERS;
break;
}
@@ -596,7 +627,7 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
strscpy(comps->name, dev_name(dev), sizeof(comps->name));
- ret = tascodec_init(tas_priv, codec, tasdev_fw_ready);
+ ret = tascodec_init(tas_hda->priv, codec, tasdev_fw_ready);
if (!ret)
comps->playback_hook = tas2781_hda_playback_hook;
@@ -609,9 +640,9 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
static void tas2781_hda_unbind(struct device *dev,
struct device *master, void *master_data)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
struct hda_component *comps = master_data;
- comps = &comps[tas_priv->index];
+ comps = &comps[tas_hda->priv->index];
if (comps->dev == dev) {
comps->dev = NULL;
@@ -619,10 +650,12 @@ static void tas2781_hda_unbind(struct device *dev,
comps->playback_hook = NULL;
}
- tasdevice_config_info_remove(tas_priv);
- tasdevice_dsp_remove(tas_priv);
+ tas2781_hda_remove_controls(tas_hda);
- tas_priv->fw_state = TASDEVICE_DSP_FW_PENDING;
+ tasdevice_config_info_remove(tas_hda->priv);
+ tasdevice_dsp_remove(tas_hda->priv);
+
+ tas_hda->priv->fw_state = TASDEVICE_DSP_FW_PENDING;
}
static const struct component_ops tas2781_hda_comp_ops = {
@@ -632,21 +665,21 @@ static const struct component_ops tas2781_hda_comp_ops = {
static void tas2781_hda_remove(struct device *dev)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- pm_runtime_get_sync(tas_priv->dev);
- pm_runtime_disable(tas_priv->dev);
+ pm_runtime_get_sync(tas_hda->dev);
+ pm_runtime_disable(tas_hda->dev);
- component_del(tas_priv->dev, &tas2781_hda_comp_ops);
+ component_del(tas_hda->dev, &tas2781_hda_comp_ops);
- pm_runtime_put_noidle(tas_priv->dev);
+ pm_runtime_put_noidle(tas_hda->dev);
- tasdevice_remove(tas_priv);
+ tasdevice_remove(tas_hda->priv);
}
static int tas2781_hda_i2c_probe(struct i2c_client *clt)
{
- struct tasdevice_priv *tas_priv;
+ struct tas2781_hda *tas_hda;
const char *device_name;
int ret;
@@ -655,35 +688,42 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
else
return -ENODEV;
- tas_priv = tasdevice_kzalloc(clt);
- if (!tas_priv)
+ tas_hda = devm_kzalloc(&clt->dev, sizeof(*tas_hda), GFP_KERNEL);
+ if (!tas_hda)
return -ENOMEM;
- tas_priv->irq_info.irq = clt->irq;
- ret = tas2781_read_acpi(tas_priv, device_name);
+ dev_set_drvdata(&clt->dev, tas_hda);
+ tas_hda->dev = &clt->dev;
+
+ tas_hda->priv = tasdevice_kzalloc(clt);
+ if (!tas_hda->priv)
+ return -ENOMEM;
+
+ tas_hda->priv->irq_info.irq = clt->irq;
+ ret = tas2781_read_acpi(tas_hda->priv, device_name);
if (ret)
- return dev_err_probe(tas_priv->dev, ret,
+ return dev_err_probe(tas_hda->dev, ret,
"Platform not supported\n");
- ret = tasdevice_init(tas_priv);
+ ret = tasdevice_init(tas_hda->priv);
if (ret)
goto err;
- pm_runtime_set_autosuspend_delay(tas_priv->dev, 3000);
- pm_runtime_use_autosuspend(tas_priv->dev);
- pm_runtime_mark_last_busy(tas_priv->dev);
- pm_runtime_set_active(tas_priv->dev);
- pm_runtime_get_noresume(tas_priv->dev);
- pm_runtime_enable(tas_priv->dev);
+ pm_runtime_set_autosuspend_delay(tas_hda->dev, 3000);
+ pm_runtime_use_autosuspend(tas_hda->dev);
+ pm_runtime_mark_last_busy(tas_hda->dev);
+ pm_runtime_set_active(tas_hda->dev);
+ pm_runtime_get_noresume(tas_hda->dev);
+ pm_runtime_enable(tas_hda->dev);
- pm_runtime_put_autosuspend(tas_priv->dev);
+ pm_runtime_put_autosuspend(tas_hda->dev);
- tas2781_reset(tas_priv);
+ tas2781_reset(tas_hda->priv);
- ret = component_add(tas_priv->dev, &tas2781_hda_comp_ops);
+ ret = component_add(tas_hda->dev, &tas2781_hda_comp_ops);
if (ret) {
- dev_err(tas_priv->dev, "Register component failed: %d\n", ret);
- pm_runtime_disable(tas_priv->dev);
+ dev_err(tas_hda->dev, "Register component failed: %d\n", ret);
+ pm_runtime_disable(tas_hda->dev);
}
err:
@@ -699,81 +739,65 @@ static void tas2781_hda_i2c_remove(struct i2c_client *clt)
static int tas2781_runtime_suspend(struct device *dev)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
int i;
- dev_dbg(tas_priv->dev, "Runtime Suspend\n");
+ dev_dbg(tas_hda->dev, "Runtime Suspend\n");
- mutex_lock(&tas_priv->codec_lock);
+ mutex_lock(&tas_hda->priv->codec_lock);
- if (tas_priv->playback_started) {
- tasdevice_tuning_switch(tas_priv, 1);
- tas_priv->playback_started = false;
+ if (tas_hda->priv->playback_started) {
+ tasdevice_tuning_switch(tas_hda->priv, 1);
+ tas_hda->priv->playback_started = false;
}
- for (i = 0; i < tas_priv->ndev; i++) {
- tas_priv->tasdevice[i].cur_book = -1;
- tas_priv->tasdevice[i].cur_prog = -1;
- tas_priv->tasdevice[i].cur_conf = -1;
+ for (i = 0; i < tas_hda->priv->ndev; i++) {
+ tas_hda->priv->tasdevice[i].cur_book = -1;
+ tas_hda->priv->tasdevice[i].cur_prog = -1;
+ tas_hda->priv->tasdevice[i].cur_conf = -1;
}
- regcache_cache_only(tas_priv->regmap, true);
- regcache_mark_dirty(tas_priv->regmap);
-
- mutex_unlock(&tas_priv->codec_lock);
+ mutex_unlock(&tas_hda->priv->codec_lock);
return 0;
}
static int tas2781_runtime_resume(struct device *dev)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
unsigned long calib_data_sz =
- tas_priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
- int ret;
-
- dev_dbg(tas_priv->dev, "Runtime Resume\n");
+ tas_hda->priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
- mutex_lock(&tas_priv->codec_lock);
+ dev_dbg(tas_hda->dev, "Runtime Resume\n");
- regcache_cache_only(tas_priv->regmap, false);
- ret = regcache_sync(tas_priv->regmap);
- if (ret) {
- dev_err(tas_priv->dev,
- "Failed to restore register cache: %d\n", ret);
- goto out;
- }
+ mutex_lock(&tas_hda->priv->codec_lock);
- tasdevice_prmg_load(tas_priv, tas_priv->cur_prog);
+ tasdevice_prmg_load(tas_hda->priv, tas_hda->priv->cur_prog);
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
*/
- if (tas_priv->cali_data.total_sz > calib_data_sz)
- tas2781_apply_calib(tas_priv);
+ if (tas_hda->priv->cali_data.total_sz > calib_data_sz)
+ tas2781_apply_calib(tas_hda->priv);
-out:
- mutex_unlock(&tas_priv->codec_lock);
+ mutex_unlock(&tas_hda->priv->codec_lock);
- return ret;
+ return 0;
}
static int tas2781_system_suspend(struct device *dev)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
int ret;
- dev_dbg(tas_priv->dev, "System Suspend\n");
+ dev_dbg(tas_hda->priv->dev, "System Suspend\n");
ret = pm_runtime_force_suspend(dev);
if (ret)
return ret;
/* Shutdown chip before system suspend */
- regcache_cache_only(tas_priv->regmap, false);
- tasdevice_tuning_switch(tas_priv, 1);
- regcache_cache_only(tas_priv->regmap, true);
- regcache_mark_dirty(tas_priv->regmap);
+ tasdevice_tuning_switch(tas_hda->priv, 1);
/*
* Reset GPIO may be shared, so cannot reset here.
@@ -784,33 +808,33 @@ static int tas2781_system_suspend(struct device *dev)
static int tas2781_system_resume(struct device *dev)
{
- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
unsigned long calib_data_sz =
- tas_priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
+ tas_hda->priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
int i, ret;
- dev_dbg(tas_priv->dev, "System Resume\n");
+ dev_info(tas_hda->priv->dev, "System Resume\n");
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
- mutex_lock(&tas_priv->codec_lock);
+ mutex_lock(&tas_hda->priv->codec_lock);
- for (i = 0; i < tas_priv->ndev; i++) {
- tas_priv->tasdevice[i].cur_book = -1;
- tas_priv->tasdevice[i].cur_prog = -1;
- tas_priv->tasdevice[i].cur_conf = -1;
+ for (i = 0; i < tas_hda->priv->ndev; i++) {
+ tas_hda->priv->tasdevice[i].cur_book = -1;
+ tas_hda->priv->tasdevice[i].cur_prog = -1;
+ tas_hda->priv->tasdevice[i].cur_conf = -1;
}
- tas2781_reset(tas_priv);
- tasdevice_prmg_load(tas_priv, tas_priv->cur_prog);
+ tas2781_reset(tas_hda->priv);
+ tasdevice_prmg_load(tas_hda->priv, tas_hda->priv->cur_prog);
/* If calibrated data occurs error, dsp will still work with default
* calibrated data inside algo.
*/
- if (tas_priv->cali_data.total_sz > calib_data_sz)
- tas2781_apply_calib(tas_priv);
- mutex_unlock(&tas_priv->codec_lock);
+ if (tas_hda->priv->cali_data.total_sz > calib_data_sz)
+ tas2781_apply_calib(tas_hda->priv);
+ mutex_unlock(&tas_hda->priv->codec_lock);
return 0;
}
diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c
index ffb26e4a7e2f..00e35169ae49 100644
--- a/sound/soc/codecs/tas2781-comlib.c
+++ b/sound/soc/codecs/tas2781-comlib.c
@@ -39,7 +39,7 @@ static const struct regmap_range_cfg tasdevice_ranges[] = {
static const struct regmap_config tasdevice_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_NONE,
.ranges = tasdevice_ranges,
.num_ranges = ARRAY_SIZE(tasdevice_ranges),
.max_register = 256 * 128,
@@ -316,8 +316,6 @@ int tasdevice_init(struct tasdevice_priv *tas_priv)
tas_priv->tasdevice[i].cur_conf = -1;
}
- dev_set_drvdata(tas_priv->dev, tas_priv);
-
mutex_init(&tas_priv->codec_lock);
out:
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index 55cd5e3c23a5..917b1c15f71d 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -689,6 +689,8 @@ static int tasdevice_i2c_probe(struct i2c_client *i2c)
if (!tas_priv)
return -ENOMEM;
+ dev_set_drvdata(&i2c->dev, tas_priv);
+
if (ACPI_HANDLE(&i2c->dev)) {
acpi_id = acpi_match_device(i2c->dev.driver->acpi_match_table,
&i2c->dev);
diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
index 5c5c04ce9db7..00852f174a69 100644
--- a/sound/soc/fsl/fsl_rpmsg.c
+++ b/sound/soc/fsl/fsl_rpmsg.c
@@ -238,7 +238,7 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
&fsl_rpmsg_dai, 1);
if (ret)
- return ret;
+ goto err_pm_disable;
rpmsg->card_pdev = platform_device_register_data(&pdev->dev,
"imx-audio-rpmsg",
@@ -248,16 +248,22 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
if (IS_ERR(rpmsg->card_pdev)) {
dev_err(&pdev->dev, "failed to register rpmsg card\n");
ret = PTR_ERR(rpmsg->card_pdev);
- return ret;
+ goto err_pm_disable;
}
return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
static void fsl_rpmsg_remove(struct platform_device *pdev)
{
struct fsl_rpmsg *rpmsg = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
if (rpmsg->card_pdev)
platform_device_unregister(rpmsg->card_pdev);
}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
index 85ae3f76d951..ad6d4b5cf697 100644
--- a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
@@ -499,7 +499,7 @@ static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY_S("AUD_PAD_TOP", SUPPLY_SEQ_ADDA_AUD_PAD_TOP,
- 0, 0, 0,
+ AFE_AUD_PAD_TOP, RG_RX_FIFO_ON_SFT, 0,
mtk_adda_pad_top_event,
SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_SUPPLY_S("ADDA_MTKAIF_CFG", SUPPLY_SEQ_ADDA_MTKAIF_CFG,
diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
index 6c4503766fdc..531bb8707a3e 100644
--- a/sound/soc/meson/g12a-toacodec.c
+++ b/sound/soc/meson/g12a-toacodec.c
@@ -71,6 +71,9 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int mux, reg;
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
regmap_field_read(priv->field_dat_sel, &reg);
@@ -101,7 +104,7 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0,
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index f7ef9aa1eed8..b92434125fac 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -45,6 +45,9 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int mux, changed;
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
changed = snd_soc_component_test_bits(component, e->reg,
CTRL0_I2S_DAT_SEL,
@@ -93,6 +96,9 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int mux, changed;
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0,
CTRL0_SPDIF_SEL,
@@ -112,7 +118,7 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0,
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 28ecbebb4b84..9f84b0d287a5 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -54,8 +54,16 @@ static int request_codec_module(struct hda_codec *codec)
static int hda_codec_load_module(struct hda_codec *codec)
{
- int ret = request_codec_module(codec);
+ int ret;
+
+ ret = snd_hdac_device_register(&codec->core);
+ if (ret) {
+ dev_err(&codec->core.dev, "failed to register hdac device\n");
+ put_device(&codec->core.dev);
+ return ret;
+ }
+ ret = request_codec_module(codec);
if (ret <= 0) {
codec->probe_id = HDA_CODEC_ID_GENERIC;
ret = request_codec_module(codec);
@@ -116,7 +124,6 @@ EXPORT_SYMBOL_NS_GPL(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
{
struct hda_codec *codec;
- int ret;
codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr);
if (IS_ERR(codec)) {
@@ -126,13 +133,6 @@ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, i
codec->core.type = type;
- ret = snd_hdac_device_register(&codec->core);
- if (ret) {
- dev_err(bus->dev, "failed to register hdac device\n");
- put_device(&codec->core.dev);
- return ERR_PTR(ret);
- }
-
return codec;
}
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
index f7c57a2c3028..33a3d1161885 100644
--- a/sound/usb/mixer_scarlett2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -1966,7 +1966,7 @@ static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
__le16 num_meters;
__le32 magic;
} __packed req;
- u32 resp[SCARLETT2_MAX_METERS];
+ __le32 resp[SCARLETT2_MAX_METERS];
int i, err;
req.pad = 0;
@@ -1979,7 +1979,7 @@ static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
/* copy, convert to u16 */
for (i = 0; i < num_meters; i++)
- levels[i] = resp[i];
+ levels[i] = le32_to_cpu(resp[i]);
return 0;
}
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 4af140cf5719..f4542d2718f4 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -218,7 +218,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
deleted file mode 100644
index ffca068e4a76..000000000000
--- a/tools/cgroup/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for cgroup tools
-
-CFLAGS = -Wall -Wextra
-
-all: cgroup_event_listener
-%: %.c
- $(CC) $(CFLAGS) -o $@ $^
-
-clean:
- $(RM) cgroup_event_listener
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
index 83971b3cbfce..f8bffd4a987c 100644
--- a/tools/include/linux/rwsem.h
+++ b/tools/include/linux/rwsem.h
@@ -37,4 +37,8 @@ static inline int up_write(struct rw_semaphore *sem)
{
return pthread_rwlock_unlock(&sem->lock);
}
+
+#define down_read_nested(sem, subclass) down_read(sem)
+#define down_write_nested(sem, subclass) down_write(sem)
+
#endif /* _TOOLS_RWSEM_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index 622266b197d0..a6cdf25b6b9d 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -11,6 +11,7 @@
#define spin_lock_init(x) pthread_mutex_init(x, NULL)
#define spin_lock(x) pthread_mutex_lock(x)
+#define spin_lock_nested(x, subclass) pthread_mutex_lock(x)
#define spin_unlock(x) pthread_mutex_unlock(x)
#define spin_lock_bh(x) pthread_mutex_lock(x)
#define spin_unlock_bh(x) pthread_mutex_unlock(x)
diff --git a/tools/include/perf/arm_pmuv3.h b/tools/include/perf/arm_pmuv3.h
index e822d49fb5b8..1e397d55384e 100644
--- a/tools/include/perf/arm_pmuv3.h
+++ b/tools/include/perf/arm_pmuv3.h
@@ -218,45 +218,54 @@
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
/*
* PMOVSR: counters overflow flag status reg
*/
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK(43, 32)
+#define ARMV8_PMU_EVTYPE_TC GENMASK(63, 61)
/*
* Event filters for PMUv3
*/
-#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
+#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
/*
* PMUSERENR: user enable reg
*/
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
/*
* This code is really good
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index da43810b7485..48ad69f7722e 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -316,6 +316,7 @@ typedef int __bitwise __kernel_rwf_t;
#define PAGE_IS_SWAPPED (1 << 4)
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
/*
* struct page_region - Page region with flags
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e94756e09ca9..548ec3cd7c00 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -291,7 +291,7 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state,
static struct cfi_state *cfi_alloc(void)
{
- struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
+ struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
if (!cfi) {
WARN("calloc failed");
exit(1);
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 4c90cc176f81..2109690b0d5f 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -683,7 +683,7 @@ Buffer handling
~~~~~~~~~~~~~~~
There may be buffer limitations (i.e. single ToPa entry) which means that actual
-buffer sizes are limited to powers of 2 up to 4MiB (MAX_ORDER). In order to
+buffer sizes are limited to powers of 2 up to 4MiB (MAX_PAGE_ORDER). In order to
provide other sizes, and in particular an arbitrarily large size, multiple
buffers are logically concatenated. However an interrupt must be used to switch
between buffers. That has two potential problems:
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 116ff501bf92..532b855df589 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -371,3 +371,8 @@
454 n64 futex_wake sys_futex_wake
455 n64 futex_wait sys_futex_wait
456 n64 futex_requeue sys_futex_requeue
+457 n64 statmount sys_statmount
+458 n64 listmount sys_listmount
+459 n64 lsm_get_self_attr sys_lsm_get_self_attr
+460 n64 lsm_set_self_attr sys_lsm_set_self_attr
+461 n64 lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 7fab411378f2..17173b82ca21 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -543,3 +543,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 86fec9b080f6..095bb86339a7 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -459,3 +459,8 @@
454 common futex_wake sys_futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue sys_futex_requeue
+457 common statmount sys_statmount sys_statmount
+458 common listmount sys_listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 8cb8bf68721c..7e8d46f4147f 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -378,6 +378,11 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/testing/memblock/linux/mmzone.h b/tools/testing/memblock/linux/mmzone.h
index 134f8eab0768..71546e15bdd3 100644
--- a/tools/testing/memblock/linux/mmzone.h
+++ b/tools/testing/memblock/linux/mmzone.h
@@ -17,10 +17,10 @@ enum zone_type {
};
#define MAX_NR_ZONES __MAX_NR_ZONES
-#define MAX_ORDER 10
-#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+#define MAX_PAGE_ORDER 10
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
-#define pageblock_order MAX_ORDER
+#define pageblock_order MAX_PAGE_ORDER
#define pageblock_nr_pages BIT(pageblock_order)
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 61fe2601cb3a..4eb442206d01 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -93,13 +93,9 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
return p;
}
-void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
+void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
assert(objp);
- uatomic_dec(&nr_allocated);
- uatomic_dec(&cachep->nr_allocated);
- if (kmalloc_verbose)
- printf("Freeing %p to slab\n", objp);
if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
@@ -111,6 +107,15 @@ void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
}
}
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
+{
+ uatomic_dec(&nr_allocated);
+ uatomic_dec(&cachep->nr_allocated);
+ if (kmalloc_verbose)
+ printf("Freeing %p to slab\n", objp);
+ __kmem_cache_free_locked(cachep, objp);
+}
+
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
pthread_mutex_lock(&cachep->lock);
@@ -141,18 +146,17 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
if (kmalloc_verbose)
pr_debug("Bulk alloc %lu\n", size);
- if (!(gfp & __GFP_DIRECT_RECLAIM)) {
- if (cachep->non_kernel < size)
- return 0;
-
- cachep->non_kernel -= size;
- }
-
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs >= size) {
struct radix_tree_node *node;
for (i = 0; i < size; i++) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (!cachep->non_kernel)
+ break;
+ cachep->non_kernel--;
+ }
+
node = cachep->objs;
cachep->nr_objs--;
cachep->objs = node->parent;
@@ -163,11 +167,19 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
} else {
pthread_mutex_unlock(&cachep->lock);
for (i = 0; i < size; i++) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (!cachep->non_kernel)
+ break;
+ cachep->non_kernel--;
+ }
+
if (cachep->align) {
posix_memalign(&p[i], cachep->align,
cachep->size);
} else {
p[i] = malloc(cachep->size);
+ if (!p[i])
+ break;
}
if (cachep->ctor)
cachep->ctor(p[i]);
@@ -176,6 +188,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
}
}
+ if (i < size) {
+ size = i;
+ pthread_mutex_lock(&cachep->lock);
+ for (i = 0; i < size; i++)
+ __kmem_cache_free_locked(cachep, p[i]);
+ pthread_mutex_unlock(&cachep->lock);
+ return 0;
+ }
+
for (i = 0; i < size; i++) {
uatomic_inc(&nr_allocated);
uatomic_inc(&cachep->nr_allocated);
diff --git a/tools/testing/radix-tree/linux/maple_tree.h b/tools/testing/radix-tree/linux/maple_tree.h
index 7d8d1f445b89..06c89bdcc515 100644
--- a/tools/testing/radix-tree/linux/maple_tree.h
+++ b/tools/testing/radix-tree/linux/maple_tree.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#define atomic_t int32_t
-#include "../../../../include/linux/maple_tree.h"
#define atomic_inc(x) uatomic_inc(x)
#define atomic_read(x) uatomic_read(x)
#define atomic_set(x, y) do {} while (0)
#define U8_MAX UCHAR_MAX
+#include "../../../../include/linux/maple_tree.h"
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 76a8990bb14e..f1caf4bcf937 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -118,6 +118,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas.alloc == NULL);
MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
mas_push_node(&mas, mn);
+ mas_reset(&mas);
mas_nomem(&mas, GFP_KERNEL); /* free */
mtree_unlock(mt);
@@ -141,7 +142,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
- mas.node = MAS_START;
+ mas.status = ma_start;
mas_nomem(&mas, GFP_KERNEL);
/* Allocate 3 nodes, will fail. */
mas_node_count(&mas, 3);
@@ -158,6 +159,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
/* Ensure we counted 3. */
MT_BUG_ON(mt, mas_allocated(&mas) != 3);
/* Free. */
+ mas_reset(&mas);
mas_nomem(&mas, GFP_KERNEL);
/* Set allocation request to 1. */
@@ -272,6 +274,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
ma_free_rcu(mn);
MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
}
+ mas_reset(&mas);
MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
}
@@ -294,6 +297,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
smn = smn->slot[0]; /* next. */
}
MT_BUG_ON(mt, mas_allocated(&mas) != total);
+ mas_reset(&mas);
mas_nomem(&mas, GFP_KERNEL); /* Free. */
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
@@ -441,7 +445,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
mas.node = MA_ERROR(-ENOMEM);
mas_node_count(&mas, 10); /* Request */
mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.node = MAS_START;
+ mas.status = ma_start;
MT_BUG_ON(mt, mas_allocated(&mas) != 10);
mas_destroy(&mas);
@@ -452,7 +456,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
mas.node = MA_ERROR(-ENOMEM);
mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */
mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.node = MAS_START;
+ mas.status = ma_start;
MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1);
mas_destroy(&mas);
@@ -941,10 +945,11 @@ retry:
ret = mas_descend_walk(mas, range_min, range_max);
if (unlikely(mte_dead_node(mas->node))) {
- mas->node = MAS_START;
+ mas->status = ma_start;
goto retry;
}
+ mas->end = mas_data_end(mas);
return ret;
not_found:
@@ -960,17 +965,19 @@ static inline void *mas_range_load(struct ma_state *mas,
unsigned long index = mas->index;
if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ mas->status = ma_start;
retry:
if (mas_tree_walk(mas, range_min, range_max))
- if (unlikely(mas->node == MAS_ROOT))
+ if (unlikely(mas->status == ma_root))
return mas_root(mas);
if (likely(mas->offset != MAPLE_NODE_SLOTS))
entry = mas_get_slot(mas, mas->offset);
- if (mas_dead_node(mas, index))
+ if (mas_is_active(mas) && mte_dead_node(mas->node)) {
+ mas_set(mas, index);
goto retry;
+ }
return entry;
}
@@ -34132,7 +34139,7 @@ STORE, 140501948112896, 140501948116991,
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_erase2_testset(mt, set27, ARRAY_SIZE(set27));
rcu_barrier();
- MT_BUG_ON(mt, 0 != mtree_load(mt, 140415537422336));
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 140415537422336));
mt_set_non_kernel(0);
mt_validate(mt);
mtree_destroy(mt);
@@ -34256,7 +34263,7 @@ STORE, 140501948112896, 140501948116991,
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_erase2_testset(mt, set37, ARRAY_SIZE(set37));
rcu_barrier();
- MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712));
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712));
mt_validate(mt);
mtree_destroy(mt);
@@ -34264,7 +34271,7 @@ STORE, 140501948112896, 140501948116991,
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_erase2_testset(mt, set38, ARRAY_SIZE(set38));
rcu_barrier();
- MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712));
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712));
mt_validate(mt);
mtree_destroy(mt);
@@ -35336,7 +35343,7 @@ static void mas_dfs_preorder(struct ma_state *mas)
unsigned char end, slot = 0;
unsigned long *pivots;
- if (mas->node == MAS_START) {
+ if (mas->status == ma_start) {
mas_start(mas);
return;
}
@@ -35373,7 +35380,7 @@ walk_up:
return;
done:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
}
@@ -35832,7 +35839,7 @@ static noinline void __init check_nomem(struct maple_tree *mt)
mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */
MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM));
mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
- MT_BUG_ON(mt, ms.node != MAS_START);
+ MT_BUG_ON(mt, ms.status != ma_start);
mtree_unlock(mt);
MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0);
mtree_lock(mt);
@@ -35857,6 +35864,363 @@ static noinline void __init check_locky(struct maple_tree *mt)
mt_clear_in_rcu(mt);
}
+/*
+ * Compares two nodes except for the addresses stored in the nodes.
+ * Returns zero if they are the same, otherwise returns non-zero.
+ */
+static int __init compare_node(struct maple_enode *enode_a,
+ struct maple_enode *enode_b)
+{
+ struct maple_node *node_a, *node_b;
+ struct maple_node a, b;
+ void **slots_a, **slots_b; /* Do not use the rcu tag. */
+ enum maple_type type;
+ int i;
+
+ if (((unsigned long)enode_a & MAPLE_NODE_MASK) !=
+ ((unsigned long)enode_b & MAPLE_NODE_MASK)) {
+ pr_err("The lower 8 bits of enode are different.\n");
+ return -1;
+ }
+
+ type = mte_node_type(enode_a);
+ node_a = mte_to_node(enode_a);
+ node_b = mte_to_node(enode_b);
+ a = *node_a;
+ b = *node_b;
+
+ /* Do not compare addresses. */
+ if (ma_is_root(node_a) || ma_is_root(node_b)) {
+ a.parent = (struct maple_pnode *)((unsigned long)a.parent &
+ MA_ROOT_PARENT);
+ b.parent = (struct maple_pnode *)((unsigned long)b.parent &
+ MA_ROOT_PARENT);
+ } else {
+ a.parent = (struct maple_pnode *)((unsigned long)a.parent &
+ MAPLE_NODE_MASK);
+ b.parent = (struct maple_pnode *)((unsigned long)b.parent &
+ MAPLE_NODE_MASK);
+ }
+
+ if (a.parent != b.parent) {
+ pr_err("The lower 8 bits of parents are different. %p %p\n",
+ a.parent, b.parent);
+ return -1;
+ }
+
+ /*
+ * If it is a leaf node, the slots do not contain the node address, and
+ * no special processing of slots is required.
+ */
+ if (ma_is_leaf(type))
+ goto cmp;
+
+ slots_a = ma_slots(&a, type);
+ slots_b = ma_slots(&b, type);
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ if (!slots_a[i] && !slots_b[i])
+ break;
+
+ if (!slots_a[i] || !slots_b[i]) {
+ pr_err("The number of slots is different.\n");
+ return -1;
+ }
+
+ /* Do not compare addresses in slots. */
+ ((unsigned long *)slots_a)[i] &= MAPLE_NODE_MASK;
+ ((unsigned long *)slots_b)[i] &= MAPLE_NODE_MASK;
+ }
+
+cmp:
+ /*
+ * Compare all contents of two nodes, including parent (except address),
+ * slots (except address), pivots, gaps and metadata.
+ */
+ return memcmp(&a, &b, sizeof(struct maple_node));
+}
+
+/*
+ * Compare two trees and return 0 if they are the same, non-zero otherwise.
+ */
+static int __init compare_tree(struct maple_tree *mt_a, struct maple_tree *mt_b)
+{
+ MA_STATE(mas_a, mt_a, 0, 0);
+ MA_STATE(mas_b, mt_b, 0, 0);
+
+ if (mt_a->ma_flags != mt_b->ma_flags) {
+ pr_err("The flags of the two trees are different.\n");
+ return -1;
+ }
+
+ mas_dfs_preorder(&mas_a);
+ mas_dfs_preorder(&mas_b);
+
+ if (mas_is_ptr(&mas_a) || mas_is_ptr(&mas_b)) {
+ if (!(mas_is_ptr(&mas_a) && mas_is_ptr(&mas_b))) {
+ pr_err("One is ma_root and the other is not.\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ while (!mas_is_none(&mas_a) || !mas_is_none(&mas_b)) {
+
+ if (mas_is_none(&mas_a) || mas_is_none(&mas_b)) {
+ pr_err("One is ma_none and the other is not.\n");
+ return -1;
+ }
+
+ if (mas_a.min != mas_b.min ||
+ mas_a.max != mas_b.max) {
+ pr_err("mas->min, mas->max do not match.\n");
+ return -1;
+ }
+
+ if (compare_node(mas_a.node, mas_b.node)) {
+ pr_err("The contents of nodes %p and %p are different.\n",
+ mas_a.node, mas_b.node);
+ mt_dump(mt_a, mt_dump_dec);
+ mt_dump(mt_b, mt_dump_dec);
+ return -1;
+ }
+
+ mas_dfs_preorder(&mas_a);
+ mas_dfs_preorder(&mas_b);
+ }
+
+ return 0;
+}
+
+static __init void mas_subtree_max_range(struct ma_state *mas)
+{
+ unsigned long limit = mas->max;
+ MA_STATE(newmas, mas->tree, 0, 0);
+ void *entry;
+
+ mas_for_each(mas, entry, limit) {
+ if (mas->last - mas->index >=
+ newmas.last - newmas.index) {
+ newmas = *mas;
+ }
+ }
+
+ *mas = newmas;
+}
+
+/*
+ * build_full_tree() - Build a full tree.
+ * @mt: The tree to build.
+ * @flags: Use @flags to build the tree.
+ * @height: The height of the tree to build.
+ *
+ * Build a tree with full leaf nodes and internal nodes. Note that the height
+ * should not exceed 3, otherwise it will take a long time to build.
+ * Return: zero if the build is successful, non-zero if it fails.
+ */
+static __init int build_full_tree(struct maple_tree *mt, unsigned int flags,
+ int height)
+{
+ MA_STATE(mas, mt, 0, 0);
+ unsigned long step;
+ int ret = 0, cnt = 1;
+ enum maple_type type;
+
+ mt_init_flags(mt, flags);
+ mtree_insert_range(mt, 0, ULONG_MAX, xa_mk_value(5), GFP_KERNEL);
+
+ mtree_lock(mt);
+
+ while (1) {
+ mas_set(&mas, 0);
+ if (mt_height(mt) < height) {
+ mas.max = ULONG_MAX;
+ goto store;
+ }
+
+ while (1) {
+ mas_dfs_preorder(&mas);
+ if (mas_is_none(&mas))
+ goto unlock;
+
+ type = mte_node_type(mas.node);
+ if (mas_data_end(&mas) + 1 < mt_slots[type]) {
+ mas_set(&mas, mas.min);
+ goto store;
+ }
+ }
+store:
+ mas_subtree_max_range(&mas);
+ step = mas.last - mas.index;
+ if (step < 1) {
+ ret = -1;
+ goto unlock;
+ }
+
+ step /= 2;
+ mas.last = mas.index + step;
+ mas_store_gfp(&mas, xa_mk_value(5),
+ GFP_KERNEL);
+ ++cnt;
+ }
+unlock:
+ mtree_unlock(mt);
+
+ MT_BUG_ON(mt, mt_height(mt) != height);
+ /* pr_info("height:%u number of elements:%d\n", mt_height(mt), cnt); */
+ return ret;
+}
+
+static noinline void __init check_mtree_dup(struct maple_tree *mt)
+{
+ DEFINE_MTREE(new);
+ int i, j, ret, count = 0;
+ unsigned int rand_seed = 17, rand;
+
+ /* store a value at [0, 0] */
+ mt_init_flags(mt, 0);
+ mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* The two trees have different attributes. */
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret != -EINVAL);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* The new tree is not empty */
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ mtree_store(&new, 5, xa_mk_value(5), GFP_KERNEL);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret != -EINVAL);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* Test for duplicating full trees. */
+ for (i = 1; i <= 3; i++) {
+ ret = build_full_tree(mt, 0, i);
+ MT_BUG_ON(mt, ret);
+ mt_init_flags(&new, 0);
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ for (i = 1; i <= 3; i++) {
+ ret = build_full_tree(mt, MT_FLAGS_ALLOC_RANGE, i);
+ MT_BUG_ON(mt, ret);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* Test for normal duplicating. */
+ for (i = 0; i < 1000; i += 3) {
+ if (i & 1) {
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ } else {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ }
+
+ for (j = 0; j < i; j++) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* Test memory allocation failed. */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i < 30; i += 3) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+
+ /* Failed at the first node. */
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(0);
+ ret = mtree_dup(mt, &new, GFP_NOWAIT);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(&new, ret != -ENOMEM);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* Random maple tree fails at a random node. */
+ for (i = 0; i < 1000; i += 3) {
+ if (i & 1) {
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ } else {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ }
+
+ for (j = 0; j < i; j++) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+ /*
+ * The rand() library function is not used, so we can generate
+ * the same random numbers on any platform.
+ */
+ rand_seed = rand_seed * 1103515245 + 12345;
+ rand = rand_seed / 65536 % 128;
+ mt_set_non_kernel(rand);
+
+ ret = mtree_dup(mt, &new, GFP_NOWAIT);
+ mt_set_non_kernel(0);
+ if (ret != 0) {
+ MT_BUG_ON(&new, ret != -ENOMEM);
+ count++;
+ mtree_destroy(mt);
+ continue;
+ }
+
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* pr_info("mtree_dup() fail %d times\n", count); */
+ BUG_ON(!count);
+}
+
extern void test_kmem_cache_bulk(void);
void farmer_tests(void)
@@ -35904,6 +36268,10 @@ void farmer_tests(void)
check_null_expand(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, 0);
+ check_mtree_dup(&tree);
+ mtree_destroy(&tree);
+
/* RCU testing */
mt_init_flags(&tree, 0);
check_erase_testset(&tree);
@@ -35938,7 +36306,9 @@ void farmer_tests(void)
void maple_tree_tests(void)
{
+#if !defined(BENCH)
farmer_tests();
+#endif
maple_tree_seed();
maple_tree_harvest();
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 8247a7c69c36..601049886963 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -26,6 +26,8 @@ TARGETS += filesystems
TARGETS += filesystems/binderfs
TARGETS += filesystems/epoll
TARGETS += filesystems/fat
+TARGETS += filesystems/overlayfs
+TARGETS += filesystems/statmount
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
@@ -43,6 +45,7 @@ TARGETS += landlock
TARGETS += lib
TARGETS += livepatch
TARGETS += lkdtm
+TARGETS += lsm
TARGETS += membarrier
TARGETS += memfd
TARGETS += memory-hotplug
diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c
index 351a098b503a..02ee3a91b780 100644
--- a/tools/testing/selftests/arm64/abi/tpidr2.c
+++ b/tools/testing/selftests/arm64/abi/tpidr2.c
@@ -254,6 +254,12 @@ static int write_clone_read(void)
putnum(++tests_run); \
putstr(" " #name "\n");
+#define skip_test(name) \
+ tests_skipped++; \
+ putstr("ok "); \
+ putnum(++tests_run); \
+ putstr(" # SKIP " #name "\n");
+
int main(int argc, char **argv)
{
int ret, i;
@@ -283,13 +289,11 @@ int main(int argc, char **argv)
} else {
putstr("# SME support not present\n");
- for (i = 0; i < EXPECTED_TESTS; i++) {
- putstr("ok ");
- putnum(i);
- putstr(" skipped, TPIDR2 not supported\n");
- }
-
- tests_skipped += EXPECTED_TESTS;
+ skip_test(default_value);
+ skip_test(write_read);
+ skip_test(write_sleep_read);
+ skip_test(write_fork_read);
+ skip_test(write_clone_read);
}
print_summary();
diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S
index 547d077e3517..fff60e2a25ad 100644
--- a/tools/testing/selftests/arm64/fp/sve-test.S
+++ b/tools/testing/selftests/arm64/fp/sve-test.S
@@ -515,6 +515,10 @@ function barf
mov x11, x1 // actual data
mov x12, x2 // data size
+#ifdef SSVE
+ mrs x13, S3_3_C4_C2_2
+#endif
+
puts "Mismatch: PID="
mov x0, x20
bl putdec
@@ -534,6 +538,12 @@ function barf
bl dumphex
puts "]\n"
+#ifdef SSVE
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
+#endif
+
mov x8, #__NR_getpid
svc #0
// fpsimd.c acitivty log dump hack
diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c
index 5f648b97a06f..ea9c7d47790f 100644
--- a/tools/testing/selftests/arm64/fp/vec-syscfg.c
+++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c
@@ -66,6 +66,11 @@ static struct vec_data vec_data[] = {
},
};
+static bool vec_type_supported(struct vec_data *data)
+{
+ return getauxval(data->hwcap_type) & data->hwcap;
+}
+
static int stdio_read_integer(FILE *f, const char *what, int *val)
{
int n = 0;
@@ -564,8 +569,11 @@ static void prctl_set_all_vqs(struct vec_data *data)
return;
}
- for (i = 0; i < ARRAY_SIZE(vec_data); i++)
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
+ if (!vec_type_supported(&vec_data[i]))
+ continue;
orig_vls[i] = vec_data[i].rdvl();
+ }
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
@@ -594,7 +602,7 @@ static void prctl_set_all_vqs(struct vec_data *data)
if (&vec_data[i] == data)
continue;
- if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap))
+ if (!vec_type_supported(&vec_data[i]))
continue;
if (vec_data[i].rdvl() != orig_vls[i]) {
@@ -765,7 +773,7 @@ int main(void)
struct vec_data *data = &vec_data[i];
unsigned long supported;
- supported = getauxval(data->hwcap_type) & data->hwcap;
+ supported = vec_type_supported(data);
if (!supported)
all_supported = false;
diff --git a/tools/testing/selftests/arm64/fp/za-test.S b/tools/testing/selftests/arm64/fp/za-test.S
index 9dcd70911397..095b45531640 100644
--- a/tools/testing/selftests/arm64/fp/za-test.S
+++ b/tools/testing/selftests/arm64/fp/za-test.S
@@ -333,6 +333,9 @@ function barf
// mov w8, #__NR_exit
// svc #0
// end hack
+
+ mrs x13, S3_3_C4_C2_2
+
smstop
mov x10, x0 // expected data
mov x11, x1 // actual data
@@ -356,6 +359,9 @@ function barf
mov x1, x12
bl dumphex
puts "]\n"
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
mov x8, #__NR_getpid
svc #0
diff --git a/tools/testing/selftests/arm64/fp/zt-test.S b/tools/testing/selftests/arm64/fp/zt-test.S
index d63286397638..b5c81e81a379 100644
--- a/tools/testing/selftests/arm64/fp/zt-test.S
+++ b/tools/testing/selftests/arm64/fp/zt-test.S
@@ -267,6 +267,8 @@ function barf
// mov w8, #__NR_exit
// svc #0
// end hack
+
+ mrs x13, S3_3_C4_C2_2
smstop
mov x10, x0 // expected data
mov x11, x1 // actual data
@@ -287,6 +289,9 @@ function barf
mov x1, x12
bl dumphex
puts "]\n"
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
mov x8, #__NR_getpid
svc #0
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index a6e9848189d6..b5eb1be2248c 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -147,71 +147,6 @@ test_add_proc()
}
#
-# Testing the new "isolated" partition root type
-#
-test_isolated()
-{
- cd $CGROUP2/test
- echo 2-3 > cpuset.cpus
- TYPE=$(cat cpuset.cpus.partition)
- [[ $TYPE = member ]] || echo member > cpuset.cpus.partition
-
- console_msg "Change from member to root"
- test_partition root
-
- console_msg "Change from root to isolated"
- test_partition isolated
-
- console_msg "Change from isolated to member"
- test_partition member
-
- console_msg "Change from member to isolated"
- test_partition isolated
-
- console_msg "Change from isolated to root"
- test_partition root
-
- console_msg "Change from root to member"
- test_partition member
-
- #
- # Testing partition root with no cpu
- #
- console_msg "Distribute all cpus to child partition"
- echo +cpuset > cgroup.subtree_control
- test_partition root
-
- mkdir A1
- cd A1
- echo 2-3 > cpuset.cpus
- test_partition root
- test_effective_cpus 2-3
- cd ..
- test_effective_cpus ""
-
- console_msg "Moving task to partition test"
- test_add_proc "No space left"
- cd A1
- test_add_proc ""
- cd ..
-
- console_msg "Shrink and expand child partition"
- cd A1
- echo 2 > cpuset.cpus
- cd ..
- test_effective_cpus 3
- cd A1
- echo 2-3 > cpuset.cpus
- cd ..
- test_effective_cpus ""
-
- # Cleaning up
- console_msg "Cleaning up"
- echo $$ > $CGROUP2/cgroup.procs
- [[ -d A1 ]] && rmdir A1
-}
-
-#
# Cpuset controller state transition test matrix.
#
# Cgroup test hierarchy
@@ -297,14 +232,14 @@ TEST_MATRIX=(
" C0-3:S+ C1-3:S+ C2-3 C4-5 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4-5 \
A1:P0,A2:P1,A3:P2,B1:P1 2-3"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4 \
- A1:P0,A2:P1,A3:P2,B1:P1 2-4"
+ A1:P0,A2:P1,A3:P2,B1:P1 2-4,2-3"
" C0-3:S+ C1-3:S+ C3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:2,A3:3,B1:4 \
- A1:P0,A2:P1,A3:P2,B1:P1 2-4"
+ A1:P0,A2:P1,A3:P2,B1:P1 2-4,3"
" C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X4:P1 . 0 A1:0-1,A2:2-3,A3:4 \
- A1:P0,A2:P2,A3:P1 2-4"
+ A1:P0,A2:P2,A3:P1 2-4,2-3"
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
. . X5 . . 0 A1:0-4,A2:1-4,A3:2-4 \
- A1:P0,A2:P-2,A3:P-1 ."
+ A1:P0,A2:P-2,A3:P-1"
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
. . . X1 . 0 A1:0-1,A2:2-4,A3:2-4 \
A1:P0,A2:P2,A3:P-1 2-4"
@@ -313,7 +248,7 @@ TEST_MATRIX=(
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 . 0 A1:0-1,A2:1,A3:3 A1:P0,A3:P2 2-3"
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 O2=1 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
" C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 P2:O3=0 . 0 A1:0-2,A2:1-2,A3: A1:P0,A3:P2 3"
- " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 T:P2:O3=0 . 0 A1:0-2,A2:1-2,A3:1-2 A1:P0,A3:P-2 3"
+ " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 T:P2:O3=0 . 0 A1:0-2,A2:1-2,A3:1-2 A1:P0,A3:P-2 3,"
# An invalidated remote partition cannot self-recover from hotplug
" C0-3:S+ C1-3:S+ C2 . X2-3 X2-3 T:P2:O2=0 O2=1 0 A1:0-3,A2:1-3,A3:2 A1:P0,A3:P-2"
@@ -347,10 +282,10 @@ TEST_MATRIX=(
# cpus_allowed/exclusive_cpus update tests
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
- A1:P0,A3:P-2 ."
+ A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. X1 . P2 . 0 A1:0-3,A2:1-3,XA1:1,XA2:,XA3:,A3:2-3 \
- A1:P0,A3:P-2 ."
+ A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. . C3 P2 . 0 A1:0-2,A2:0-2,XA2:3,XA3:3,A3:3 \
A1:P0,A3:P2 3"
@@ -359,13 +294,13 @@ TEST_MATRIX=(
A1:P0,A3:P2 3"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
. . X3 . . 0 A1:0-3,A2:1-3,XA2:3,XA3:3,A3:2-3 \
- A1:P0,A3:P-2 ."
+ A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
. . C3 . . 0 A1:0-3,A2:3,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P-2 ."
+ A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
. C4 . . . 0 A1:4,A2:4,A3:4,XA1:,XA2:,XA3 \
- A1:P0,A3:P-2 ."
+ A1:P0,A3:P-2"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
@@ -441,7 +376,7 @@ write_cpu_online()
}
fi
echo $VAL > $CPUFILE
- pause 0.01
+ pause 0.05
}
#
@@ -573,12 +508,14 @@ dump_states()
XECPUS=$DIR/cpuset.cpus.exclusive.effective
PRS=$DIR/cpuset.cpus.partition
PCPUS=$DIR/.__DEBUG__.cpuset.cpus.subpartitions
+ ISCPUS=$DIR/cpuset.cpus.isolated
[[ -e $CPUS ]] && echo "$CPUS: $(cat $CPUS)"
[[ -e $XCPUS ]] && echo "$XCPUS: $(cat $XCPUS)"
[[ -e $ECPUS ]] && echo "$ECPUS: $(cat $ECPUS)"
[[ -e $XECPUS ]] && echo "$XECPUS: $(cat $XECPUS)"
[[ -e $PRS ]] && echo "$PRS: $(cat $PRS)"
[[ -e $PCPUS ]] && echo "$PCPUS: $(cat $PCPUS)"
+ [[ -e $ISCPUS ]] && echo "$ISCPUS: $(cat $ISCPUS)"
done
}
@@ -656,11 +593,17 @@ check_cgroup_states()
#
# Get isolated (including offline) CPUs by looking at
-# /sys/kernel/debug/sched/domains and compare that with the expected value.
+# /sys/kernel/debug/sched/domains and cpuset.cpus.isolated control file,
+# if available, and compare that with the expected value.
#
-# Note that a sched domain of just 1 CPU will be considered isolated.
+# Note that isolated CPUs from the sched/domains context include offline
+# CPUs as well as CPUs in non-isolated 1-CPU partition. Those CPUs may
+# not be included in the cpuset.cpus.isolated control file which contains
+# only CPUs in isolated partitions.
#
-# $1 - expected isolated cpu list
+# $1 - expected isolated cpu list(s) <isolcpus1>{,<isolcpus2>}
+# <isolcpus1> - expected sched/domains value
+# <isolcpus2> - cpuset.cpus.isolated value = <isolcpus1> if not defined
#
check_isolcpus()
{
@@ -668,8 +611,38 @@ check_isolcpus()
ISOLCPUS=
LASTISOLCPU=
SCHED_DOMAINS=/sys/kernel/debug/sched/domains
+ ISCPUS=${CGROUP2}/cpuset.cpus.isolated
+ if [[ $EXPECT_VAL = . ]]
+ then
+ EXPECT_VAL=
+ EXPECT_VAL2=
+ elif [[ $(expr $EXPECT_VAL : ".*,.*") > 0 ]]
+ then
+ set -- $(echo $EXPECT_VAL | sed -e "s/,/ /g")
+ EXPECT_VAL=$1
+ EXPECT_VAL2=$2
+ else
+ EXPECT_VAL2=$EXPECT_VAL
+ fi
+
+ #
+ # Check the debug isolated cpumask, if present
+ #
+ [[ -f $ISCPUS ]] && {
+ ISOLCPUS=$(cat $ISCPUS)
+ [[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && {
+ # Take a 50ms pause and try again
+ pause 0.05
+ ISOLCPUS=$(cat $ISCPUS)
+ }
+ [[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && return 1
+ ISOLCPUS=
+ }
+
+ #
+ # Use the sched domain in debugfs to check isolated CPUs, if available
+ #
[[ -d $SCHED_DOMAINS ]] || return 0
- [[ $EXPECT_VAL = . ]] && EXPECT_VAL=
for ((CPU=0; CPU < $NR_CPUS; CPU++))
do
@@ -714,6 +687,26 @@ test_fail()
}
#
+# Check to see if there are unexpected isolated CPUs left
+#
+null_isolcpus_check()
+{
+ [[ $VERBOSE -gt 0 ]] || return 0
+ # Retry a few times before printing error
+ RETRY=0
+ while [[ $RETRY -lt 5 ]]
+ do
+ pause 0.01
+ check_isolcpus "."
+ [[ $? -eq 0 ]] && return 0
+ ((RETRY++))
+ done
+ echo "Unexpected isolated CPUs: $ISOLCPUS"
+ dump_states
+ exit 1
+}
+
+#
# Run cpuset state transition test
# $1 - test matrix name
#
@@ -787,7 +780,7 @@ run_state_test()
#
NEWLIST=$(cat cpuset.cpus.effective)
RETRY=0
- while [[ $NEWLIST != $CPULIST && $RETRY -lt 5 ]]
+ while [[ $NEWLIST != $CPULIST && $RETRY -lt 8 ]]
do
# Wait a bit longer & recheck a few times
pause 0.01
@@ -798,6 +791,7 @@ run_state_test()
echo "Effective cpus changed to $NEWLIST after test $I!"
exit 1
}
+ null_isolcpus_check
[[ $VERBOSE -gt 0 ]] && echo "Test $I done."
((I++))
done
@@ -805,6 +799,72 @@ run_state_test()
}
#
+# Testing the new "isolated" partition root type
+#
+test_isolated()
+{
+ cd $CGROUP2/test
+ echo 2-3 > cpuset.cpus
+ TYPE=$(cat cpuset.cpus.partition)
+ [[ $TYPE = member ]] || echo member > cpuset.cpus.partition
+
+ console_msg "Change from member to root"
+ test_partition root
+
+ console_msg "Change from root to isolated"
+ test_partition isolated
+
+ console_msg "Change from isolated to member"
+ test_partition member
+
+ console_msg "Change from member to isolated"
+ test_partition isolated
+
+ console_msg "Change from isolated to root"
+ test_partition root
+
+ console_msg "Change from root to member"
+ test_partition member
+
+ #
+ # Testing partition root with no cpu
+ #
+ console_msg "Distribute all cpus to child partition"
+ echo +cpuset > cgroup.subtree_control
+ test_partition root
+
+ mkdir A1
+ cd A1
+ echo 2-3 > cpuset.cpus
+ test_partition root
+ test_effective_cpus 2-3
+ cd ..
+ test_effective_cpus ""
+
+ console_msg "Moving task to partition test"
+ test_add_proc "No space left"
+ cd A1
+ test_add_proc ""
+ cd ..
+
+ console_msg "Shrink and expand child partition"
+ cd A1
+ echo 2 > cpuset.cpus
+ cd ..
+ test_effective_cpus 3
+ cd A1
+ echo 2-3 > cpuset.cpus
+ cd ..
+ test_effective_cpus ""
+
+ # Cleaning up
+ console_msg "Cleaning up"
+ echo $$ > $CGROUP2/cgroup.procs
+ [[ -d A1 ]] && rmdir A1
+ null_isolcpus_check
+}
+
+#
# Wait for inotify event for the given file and read it
# $1: cgroup file to wait for
# $2: file to store the read result
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index ff519029f6f4..8845353aca53 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -740,7 +740,7 @@ static int test_cgfreezer_ptraced(const char *root)
/*
* cg_check_frozen(cgroup, true) will fail here,
- * because the task in in the TRACEd state.
+ * because the task is in the TRACEd state.
*/
if (cg_freeze_wait(cgroup, false))
goto cleanup;
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index c99d2adaca3f..47fdaa146443 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -50,9 +50,9 @@ static int get_zswap_stored_pages(size_t *value)
return read_int("/sys/kernel/debug/zswap/stored_pages", value);
}
-static int get_zswap_written_back_pages(size_t *value)
+static int get_cg_wb_count(const char *cg)
{
- return read_int("/sys/kernel/debug/zswap/written_back_pages", value);
+ return cg_read_key_long(cg, "memory.stat", "zswp_wb");
}
static long get_zswpout(const char *cgroup)
@@ -73,6 +73,24 @@ static int allocate_bytes(const char *cgroup, void *arg)
return 0;
}
+static char *setup_test_group_1M(const char *root, const char *name)
+{
+ char *group_name = cg_name(root, name);
+
+ if (!group_name)
+ return NULL;
+ if (cg_create(group_name))
+ goto fail;
+ if (cg_write(group_name, "memory.max", "1M")) {
+ cg_destroy(group_name);
+ goto fail;
+ }
+ return group_name;
+fail:
+ free(group_name);
+ return NULL;
+}
+
/*
* Sanity test to check that pages are written into zswap.
*/
@@ -117,43 +135,51 @@ out:
/*
* When trying to store a memcg page in zswap, if the memcg hits its memory
- * limit in zswap, writeback should not be triggered.
- *
- * This was fixed with commit 0bdf0efa180a("zswap: do not shrink if cgroup may
- * not zswap"). Needs to be revised when a per memcg writeback mechanism is
- * implemented.
+ * limit in zswap, writeback should affect only the zswapped pages of that
+ * memcg.
*/
static int test_no_invasive_cgroup_shrink(const char *root)
{
- size_t written_back_before, written_back_after;
int ret = KSFT_FAIL;
- char *test_group;
+ size_t control_allocation_size = MB(10);
+ char *control_allocation, *wb_group = NULL, *control_group = NULL;
/* Set up */
- test_group = cg_name(root, "no_shrink_test");
- if (!test_group)
- goto out;
- if (cg_create(test_group))
+ wb_group = setup_test_group_1M(root, "per_memcg_wb_test1");
+ if (!wb_group)
+ return KSFT_FAIL;
+ if (cg_write(wb_group, "memory.zswap.max", "10K"))
goto out;
- if (cg_write(test_group, "memory.max", "1M"))
+ control_group = setup_test_group_1M(root, "per_memcg_wb_test2");
+ if (!control_group)
goto out;
- if (cg_write(test_group, "memory.zswap.max", "10K"))
+
+ /* Push some test_group2 memory into zswap */
+ if (cg_enter_current(control_group))
goto out;
- if (get_zswap_written_back_pages(&written_back_before))
+ control_allocation = malloc(control_allocation_size);
+ for (int i = 0; i < control_allocation_size; i += 4095)
+ control_allocation[i] = 'a';
+ if (cg_read_key_long(control_group, "memory.stat", "zswapped") < 1)
goto out;
- /* Allocate 10x memory.max to push memory into zswap */
- if (cg_run(test_group, allocate_bytes, (void *)MB(10)))
+ /* Allocate 10x memory.max to push wb_group memory into zswap and trigger wb */
+ if (cg_run(wb_group, allocate_bytes, (void *)MB(10)))
goto out;
- /* Verify that no writeback happened because of the memcg allocation */
- if (get_zswap_written_back_pages(&written_back_after))
- goto out;
- if (written_back_after == written_back_before)
+ /* Verify that only zswapped memory from gwb_group has been written back */
+ if (get_cg_wb_count(wb_group) > 0 && get_cg_wb_count(control_group) == 0)
ret = KSFT_PASS;
out:
- cg_destroy(test_group);
- free(test_group);
+ cg_enter_current(root);
+ if (control_group) {
+ cg_destroy(control_group);
+ free(control_group);
+ }
+ cg_destroy(wb_group);
+ free(wb_group);
+ if (control_allocation)
+ free(control_allocation);
return ret;
}
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index b71247ba7196..8a1cc2bf1864 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -2,6 +2,7 @@
# Makefile for damon selftests
TEST_GEN_FILES += huge_count_read_write
+TEST_GEN_FILES += access_memory
TEST_FILES = _chk_dependency.sh _debugfs_common.sh
TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
@@ -9,6 +10,8 @@ TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
TEST_PROGS += debugfs_duplicate_context_creation.sh
TEST_PROGS += debugfs_rm_non_contexts.sh
TEST_PROGS += sysfs.sh sysfs_update_removed_scheme_dir.sh
+TEST_PROGS += sysfs_update_schemes_tried_regions_hang.py
+TEST_PROGS += sysfs_update_schemes_tried_regions_wss_estimation.py
TEST_PROGS += reclaim.sh lru_sort.sh
include ../lib.mk
diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
new file mode 100644
index 000000000000..e98cf4b6a4b7
--- /dev/null
+++ b/tools/testing/selftests/damon/_damon_sysfs.py
@@ -0,0 +1,322 @@
+# SPDX-License-Identifier: GPL-2.0
+
+import os
+
+sysfs_root = '/sys/kernel/mm/damon/admin'
+
+def write_file(path, string):
+ "Returns error string if failed, or None otherwise"
+ string = '%s' % string
+ try:
+ with open(path, 'w') as f:
+ f.write(string)
+ except Exception as e:
+ return '%s' % e
+ return None
+
+def read_file(path):
+ '''Returns the read content and error string. The read content is None if
+ the reading failed'''
+ try:
+ with open(path, 'r') as f:
+ return f.read(), None
+ except Exception as e:
+ return None, '%s' % e
+
+class DamosAccessPattern:
+ size = None
+ nr_accesses = None
+ age = None
+ scheme = None
+
+ def __init__(self, size=None, nr_accesses=None, age=None):
+ self.size = size
+ self.nr_accesses = nr_accesses
+ self.age = age
+
+ if self.size == None:
+ self.size = [0, 2**64 - 1]
+ if self.nr_accesses == None:
+ self.nr_accesses = [0, 2**64 - 1]
+ if self.age == None:
+ self.age = [0, 2**64 - 1]
+
+ def sysfs_dir(self):
+ return os.path.join(self.scheme.sysfs_dir(), 'access_pattern')
+
+ def stage(self):
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'sz', 'min'), self.size[0])
+ if err != None:
+ return err
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'sz', 'max'), self.size[1])
+ if err != None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'nr_accesses', 'min'),
+ self.nr_accesses[0])
+ if err != None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'nr_accesses', 'max'),
+ self.nr_accesses[1])
+ if err != None:
+ return err
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'age', 'min'), self.age[0])
+ if err != None:
+ return err
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'age', 'max'), self.age[1])
+ if err != None:
+ return err
+
+class Damos:
+ action = None
+ access_pattern = None
+ # todo: Support quotas, watermarks, stats, tried_regions
+ idx = None
+ context = None
+ tried_bytes = None
+
+ def __init__(self, action='stat', access_pattern=DamosAccessPattern()):
+ self.action = action
+ self.access_pattern = access_pattern
+ self.access_pattern.scheme = self
+
+ def sysfs_dir(self):
+ return os.path.join(
+ self.context.sysfs_dir(), 'schemes', '%d' % self.idx)
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'action'), self.action)
+ if err != None:
+ return err
+ err = self.access_pattern.stage()
+ if err != None:
+ return err
+
+ # disable quotas
+ err = write_file(os.path.join(self.sysfs_dir(), 'quotas', 'ms'), '0')
+ if err != None:
+ return err
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'quotas', 'bytes'), '0')
+ if err != None:
+ return err
+
+ # disable watermarks
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'watermarks', 'metric'), 'none')
+ if err != None:
+ return err
+
+ # disable filters
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'filters', 'nr_filters'), '0')
+ if err != None:
+ return err
+
+class DamonTarget:
+ pid = None
+ # todo: Support target regions if test is made
+ idx = None
+ context = None
+
+ def __init__(self, pid):
+ self.pid = pid
+
+ def sysfs_dir(self):
+ return os.path.join(
+ self.context.sysfs_dir(), 'targets', '%d' % self.idx)
+
+ def stage(self):
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'regions', 'nr_regions'), '0')
+ if err != None:
+ return err
+ return write_file(
+ os.path.join(self.sysfs_dir(), 'pid_target'), self.pid)
+
+class DamonAttrs:
+ sample_us = None
+ aggr_us = None
+ update_us = None
+ min_nr_regions = None
+ max_nr_regions = None
+ context = None
+
+ def __init__(self, sample_us=5000, aggr_us=100000, update_us=1000000,
+ min_nr_regions=10, max_nr_regions=1000):
+ self.sample_us = sample_us
+ self.aggr_us = aggr_us
+ self.update_us = update_us
+ self.min_nr_regions = min_nr_regions
+ self.max_nr_regions = max_nr_regions
+
+ def interval_sysfs_dir(self):
+ return os.path.join(self.context.sysfs_dir(), 'monitoring_attrs',
+ 'intervals')
+
+ def nr_regions_range_sysfs_dir(self):
+ return os.path.join(self.context.sysfs_dir(), 'monitoring_attrs',
+ 'nr_regions')
+
+ def stage(self):
+ err = write_file(os.path.join(self.interval_sysfs_dir(), 'sample_us'),
+ self.sample_us)
+ if err != None:
+ return err
+ err = write_file(os.path.join(self.interval_sysfs_dir(), 'aggr_us'),
+ self.aggr_us)
+ if err != None:
+ return err
+ err = write_file(os.path.join(self.interval_sysfs_dir(), 'update_us'),
+ self.update_us)
+ if err != None:
+ return err
+
+ err = write_file(
+ os.path.join(self.nr_regions_range_sysfs_dir(), 'min'),
+ self.min_nr_regions)
+ if err != None:
+ return err
+
+ err = write_file(
+ os.path.join(self.nr_regions_range_sysfs_dir(), 'max'),
+ self.max_nr_regions)
+ if err != None:
+ return err
+
+class DamonCtx:
+ ops = None
+ monitoring_attrs = None
+ targets = None
+ schemes = None
+ kdamond = None
+ idx = None
+
+ def __init__(self, ops='paddr', monitoring_attrs=DamonAttrs(), targets=[],
+ schemes=[]):
+ self.ops = ops
+ self.monitoring_attrs = monitoring_attrs
+ self.monitoring_attrs.context = self
+
+ self.targets = targets
+ for idx, target in enumerate(self.targets):
+ target.idx = idx
+ target.context = self
+
+ self.schemes = schemes
+ for idx, scheme in enumerate(self.schemes):
+ scheme.idx = idx
+ scheme.context = self
+
+ def sysfs_dir(self):
+ return os.path.join(self.kdamond.sysfs_dir(), 'contexts',
+ '%d' % self.idx)
+
+ def stage(self):
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'operations'), self.ops)
+ if err != None:
+ return err
+ err = self.monitoring_attrs.stage()
+ if err != None:
+ return err
+
+ nr_targets_file = os.path.join(
+ self.sysfs_dir(), 'targets', 'nr_targets')
+ content, err = read_file(nr_targets_file)
+ if err != None:
+ return err
+ if int(content) != len(self.targets):
+ err = write_file(nr_targets_file, '%d' % len(self.targets))
+ if err != None:
+ return err
+ for target in self.targets:
+ err = target.stage()
+ if err != None:
+ return err
+
+ nr_schemes_file = os.path.join(
+ self.sysfs_dir(), 'schemes', 'nr_schemes')
+ content, err = read_file(nr_schemes_file)
+ if int(content) != len(self.schemes):
+ err = write_file(nr_schemes_file, '%d' % len(self.schemes))
+ if err != None:
+ return err
+ for scheme in self.schemes:
+ err = scheme.stage()
+ if err != None:
+ return err
+ return None
+
+class Kdamond:
+ state = None
+ pid = None
+ contexts = None
+ idx = None # index of this kdamond between siblings
+ kdamonds = None # parent
+
+ def __init__(self, contexts=[]):
+ self.contexts = contexts
+ for idx, context in enumerate(self.contexts):
+ context.idx = idx
+ context.kdamond = self
+
+ def sysfs_dir(self):
+ return os.path.join(self.kdamonds.sysfs_dir(), '%d' % self.idx)
+
+ def start(self):
+ nr_contexts_file = os.path.join(self.sysfs_dir(),
+ 'contexts', 'nr_contexts')
+ content, err = read_file(nr_contexts_file)
+ if err != None:
+ return err
+ if int(content) != len(self.contexts):
+ err = write_file(nr_contexts_file, '%d' % len(self.contexts))
+ if err != None:
+ return err
+
+ for context in self.contexts:
+ err = context.stage()
+ if err != None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'state'), 'on')
+ return err
+
+ def update_schemes_tried_bytes(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'state'),
+ 'update_schemes_tried_bytes')
+ if err != None:
+ return err
+ for context in self.contexts:
+ for scheme in context.schemes:
+ content, err = read_file(os.path.join(scheme.sysfs_dir(),
+ 'tried_regions', 'total_bytes'))
+ if err != None:
+ return err
+ scheme.tried_bytes = int(content)
+
+class Kdamonds:
+ kdamonds = []
+
+ def __init__(self, kdamonds=[]):
+ self.kdamonds = kdamonds
+ for idx, kdamond in enumerate(self.kdamonds):
+ kdamond.idx = idx
+ kdamond.kdamonds = self
+
+ def sysfs_dir(self):
+ return os.path.join(sysfs_root, 'kdamonds')
+
+ def start(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'nr_kdamonds'),
+ '%s' % len(self.kdamonds))
+ if err != None:
+ return err
+ for kdamond in self.kdamonds:
+ err = kdamond.start()
+ if err != None:
+ return err
+ return None
diff --git a/tools/testing/selftests/damon/access_memory.c b/tools/testing/selftests/damon/access_memory.c
new file mode 100644
index 000000000000..585a2fa54329
--- /dev/null
+++ b/tools/testing/selftests/damon/access_memory.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Artificial memory access program for testing DAMON.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+int main(int argc, char *argv[])
+{
+ char **regions;
+ clock_t start_clock;
+ int nr_regions;
+ int sz_region;
+ int access_time_ms;
+ int i;
+
+ if (argc != 4) {
+ printf("Usage: %s <number> <size (bytes)> <time (ms)>\n",
+ argv[0]);
+ return -1;
+ }
+
+ nr_regions = atoi(argv[1]);
+ sz_region = atoi(argv[2]);
+ access_time_ms = atoi(argv[3]);
+
+ regions = malloc(sizeof(*regions) * nr_regions);
+ for (i = 0; i < nr_regions; i++)
+ regions[i] = malloc(sz_region);
+
+ for (i = 0; i < nr_regions; i++) {
+ start_clock = clock();
+ while ((clock() - start_clock) * 1000 / CLOCKS_PER_SEC <
+ access_time_ms)
+ memset(regions[i], i, 1024 * 1024 * 10);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh
index 56f0230a8b92..e9a976d296e2 100755
--- a/tools/testing/selftests/damon/sysfs.sh
+++ b/tools/testing/selftests/damon/sysfs.sh
@@ -150,6 +150,32 @@ test_weights()
ensure_file "$weights_dir/age_permil" "exist" "600"
}
+test_goal()
+{
+ goal_dir=$1
+ ensure_dir "$goal_dir" "exist"
+ ensure_file "$goal_dir/target_value" "exist" "600"
+ ensure_file "$goal_dir/current_value" "exist" "600"
+}
+
+test_goals()
+{
+ goals_dir=$1
+ ensure_dir "$goals_dir" "exist"
+ ensure_file "$goals_dir/nr_goals" "exist" "600"
+
+ ensure_write_succ "$goals_dir/nr_goals" "1" "valid input"
+ test_goal "$goals_dir/0"
+
+ ensure_write_succ "$goals_dir/nr_goals" "2" "valid input"
+ test_goal "$goals_dir/0"
+ test_goal "$goals_dir/1"
+
+ ensure_write_succ "$goals_dir/nr_goals" "0" "valid input"
+ ensure_dir "$goals_dir/0" "not_exist"
+ ensure_dir "$goals_dir/1" "not_exist"
+}
+
test_quotas()
{
quotas_dir=$1
@@ -158,6 +184,7 @@ test_quotas()
ensure_file "$quotas_dir/bytes" "exist" 600
ensure_file "$quotas_dir/reset_interval_ms" "exist" 600
test_weights "$quotas_dir/weights"
+ test_goals "$quotas_dir/goals"
}
test_access_pattern()
diff --git a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py
new file mode 100644
index 000000000000..8c690ba1a573
--- /dev/null
+++ b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+import time
+
+import _damon_sysfs
+
+def main():
+ proc = subprocess.Popen(['sleep', '2'])
+ kdamonds = _damon_sysfs.Kdamonds([_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ ops='vaddr',
+ targets=[_damon_sysfs.DamonTarget(pid=proc.pid)],
+ schemes=[_damon_sysfs.Damos(
+ access_pattern=_damon_sysfs.DamosAccessPattern(
+ nr_accesses=[200, 200]))] # schemes
+ )] # contexts
+ )]) # kdamonds
+
+ err = kdamonds.start()
+ if err != None:
+ print('kdmaond start failed: %s' % err)
+ exit(1)
+
+ while proc.poll() == None:
+ err = kdamonds.kdamonds[0].update_schemes_tried_bytes()
+ if err != None:
+ print('tried bytes update failed: %s' % err)
+ exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py
new file mode 100644
index 000000000000..cdbf19b442c9
--- /dev/null
+++ b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+import time
+
+import _damon_sysfs
+
+def main():
+ # access two 10 MiB memory regions, 2 second per each
+ sz_region = 10 * 1024 * 1024
+ proc = subprocess.Popen(['./access_memory', '2', '%d' % sz_region, '2000'])
+ kdamonds = _damon_sysfs.Kdamonds([_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ ops='vaddr',
+ targets=[_damon_sysfs.DamonTarget(pid=proc.pid)],
+ schemes=[_damon_sysfs.Damos(
+ access_pattern=_damon_sysfs.DamosAccessPattern(
+ # >= 25% access rate, >= 200ms age
+ nr_accesses=[5, 20], age=[2, 2**64 - 1]))] # schemes
+ )] # contexts
+ )]) # kdamonds
+
+ err = kdamonds.start()
+ if err != None:
+ print('kdmaond start failed: %s' % err)
+ exit(1)
+
+ wss_collected = []
+ while proc.poll() == None:
+ time.sleep(0.1)
+ err = kdamonds.kdamonds[0].update_schemes_tried_bytes()
+ if err != None:
+ print('tried bytes update failed: %s' % err)
+ exit(1)
+
+ wss_collected.append(
+ kdamonds.kdamonds[0].contexts[0].schemes[0].tried_bytes)
+
+ wss_collected.sort()
+ acceptable_error_rate = 0.2
+ for percentile in [50, 75]:
+ sample = wss_collected[int(len(wss_collected) * percentile / 100)]
+ error_rate = abs(sample - sz_region) / sz_region
+ print('%d-th percentile (%d) error %f' %
+ (percentile, sample, error_rate))
+ if error_rate > acceptable_error_rate:
+ print('the error rate is not acceptable (> %f)' %
+ acceptable_error_rate)
+ print('samples are as below')
+ print('\n'.join(['%d' % wss for wss in wss_collected]))
+ exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
index 4917dbb35a44..5667febee328 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
@@ -30,16 +30,16 @@ ip netns exec server ip addr add ${server_ip4}/24 dev eth0
ip netns exec client ip link add dev bond0 down type bond mode 1 \
miimon 100 all_slaves_active 1
-ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev eth0 master bond0
ip netns exec client ip link set dev bond0 up
ip netns exec client ip addr add ${client_ip4}/24 dev bond0
ip netns exec client ping -c 5 $server_ip4 >/dev/null
-ip netns exec client ip link set dev eth0 down nomaster
+ip netns exec client ip link set dev eth0 nomaster
ip netns exec client ip link set dev bond0 down
ip netns exec client ip link set dev bond0 type bond mode 0 \
arp_interval 1000 arp_ip_target "+${server_ip4}"
-ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev eth0 master bond0
ip netns exec client ip link set dev bond0 up
ip netns exec client ping -c 5 $server_ip4 >/dev/null
diff --git a/tools/testing/selftests/filesystems/overlayfs/.gitignore b/tools/testing/selftests/filesystems/overlayfs/.gitignore
new file mode 100644
index 000000000000..52ae618fdd98
--- /dev/null
+++ b/tools/testing/selftests/filesystems/overlayfs/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dev_in_maps
diff --git a/tools/testing/selftests/filesystems/overlayfs/Makefile b/tools/testing/selftests/filesystems/overlayfs/Makefile
new file mode 100644
index 000000000000..56b2b48a765b
--- /dev/null
+++ b/tools/testing/selftests/filesystems/overlayfs/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := dev_in_maps
+
+CFLAGS := -Wall -Werror
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
new file mode 100644
index 000000000000..e19ab0e85709
--- /dev/null
+++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+
+#include <linux/unistd.h>
+#include <linux/types.h>
+#include <linux/mount.h>
+#include <sys/syscall.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <fcntl.h>
+
+#include "../../kselftest.h"
+#include "log.h"
+
+static int sys_fsopen(const char *fsname, unsigned int flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static int sys_fsconfig(int fd, unsigned int cmd, const char *key, const char *value, int aux)
+{
+ return syscall(__NR_fsconfig, fd, cmd, key, value, aux);
+}
+
+static int sys_fsmount(int fd, unsigned int flags, unsigned int attr_flags)
+{
+ return syscall(__NR_fsmount, fd, flags, attr_flags);
+}
+
+static int sys_move_mount(int from_dfd, const char *from_pathname,
+ int to_dfd, const char *to_pathname,
+ unsigned int flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_pathname, to_dfd, to_pathname, flags);
+}
+
+static long get_file_dev_and_inode(void *addr, struct statx *stx)
+{
+ char buf[4096];
+ FILE *mapf;
+
+ mapf = fopen("/proc/self/maps", "r");
+ if (mapf == NULL)
+ return pr_perror("fopen(/proc/self/maps)");
+
+ while (fgets(buf, sizeof(buf), mapf)) {
+ unsigned long start, end;
+ uint32_t maj, min;
+ __u64 ino;
+
+ if (sscanf(buf, "%lx-%lx %*s %*s %x:%x %llu",
+ &start, &end, &maj, &min, &ino) != 5)
+ return pr_perror("unable to parse: %s", buf);
+ if (start == (unsigned long)addr) {
+ stx->stx_dev_major = maj;
+ stx->stx_dev_minor = min;
+ stx->stx_ino = ino;
+ return 0;
+ }
+ }
+
+ return pr_err("unable to find the mapping");
+}
+
+static int ovl_mount(void)
+{
+ int tmpfs, fsfd, ovl;
+
+ fsfd = sys_fsopen("tmpfs", 0);
+ if (fsfd == -1)
+ return pr_perror("fsopen(tmpfs)");
+
+ if (sys_fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0) == -1)
+ return pr_perror("FSCONFIG_CMD_CREATE");
+
+ tmpfs = sys_fsmount(fsfd, 0, 0);
+ if (tmpfs == -1)
+ return pr_perror("fsmount");
+
+ close(fsfd);
+
+ /* overlayfs can't be constructed on top of a detached mount. */
+ if (sys_move_mount(tmpfs, "", AT_FDCWD, "/tmp", MOVE_MOUNT_F_EMPTY_PATH))
+ return pr_perror("move_mount");
+ close(tmpfs);
+
+ if (mkdir("/tmp/w", 0755) == -1 ||
+ mkdir("/tmp/u", 0755) == -1 ||
+ mkdir("/tmp/l", 0755) == -1)
+ return pr_perror("mkdir");
+
+ fsfd = sys_fsopen("overlay", 0);
+ if (fsfd == -1)
+ return pr_perror("fsopen(overlay)");
+ if (sys_fsconfig(fsfd, FSCONFIG_SET_STRING, "source", "test", 0) == -1 ||
+ sys_fsconfig(fsfd, FSCONFIG_SET_STRING, "lowerdir", "/tmp/l", 0) == -1 ||
+ sys_fsconfig(fsfd, FSCONFIG_SET_STRING, "upperdir", "/tmp/u", 0) == -1 ||
+ sys_fsconfig(fsfd, FSCONFIG_SET_STRING, "workdir", "/tmp/w", 0) == -1)
+ return pr_perror("fsconfig");
+ if (sys_fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0) == -1)
+ return pr_perror("fsconfig");
+ ovl = sys_fsmount(fsfd, 0, 0);
+ if (ovl == -1)
+ return pr_perror("fsmount");
+
+ return ovl;
+}
+
+/*
+ * Check that the file device and inode shown in /proc/pid/maps match values
+ * returned by stat(2).
+ */
+static int test(void)
+{
+ struct statx stx, mstx;
+ int ovl, fd;
+ void *addr;
+
+ ovl = ovl_mount();
+ if (ovl == -1)
+ return -1;
+
+ fd = openat(ovl, "test", O_RDWR | O_CREAT, 0644);
+ if (fd == -1)
+ return pr_perror("openat");
+
+ addr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, 0);
+ if (addr == MAP_FAILED)
+ return pr_perror("mmap");
+
+ if (get_file_dev_and_inode(addr, &mstx))
+ return -1;
+ if (statx(fd, "", AT_EMPTY_PATH | AT_STATX_SYNC_AS_STAT, STATX_INO, &stx))
+ return pr_perror("statx");
+
+ if (stx.stx_dev_major != mstx.stx_dev_major ||
+ stx.stx_dev_minor != mstx.stx_dev_minor ||
+ stx.stx_ino != mstx.stx_ino)
+ return pr_fail("unmatched dev:ino %x:%x:%llx (expected %x:%x:%llx)\n",
+ mstx.stx_dev_major, mstx.stx_dev_minor, mstx.stx_ino,
+ stx.stx_dev_major, stx.stx_dev_minor, stx.stx_ino);
+
+ ksft_test_result_pass("devices are matched\n");
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int fsfd;
+
+ fsfd = sys_fsopen("overlay", 0);
+ if (fsfd == -1) {
+ ksft_test_result_skip("unable to create overlay mount\n");
+ return 1;
+ }
+ close(fsfd);
+
+ /* Create a new mount namespace to not care about cleaning test mounts. */
+ if (unshare(CLONE_NEWNS) == -1) {
+ ksft_test_result_skip("unable to create a new mount namespace\n");
+ return 1;
+ }
+
+ if (mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL) == -1) {
+ pr_perror("mount");
+ return 1;
+ }
+
+ ksft_set_plan(1);
+
+ if (test())
+ return 1;
+
+ ksft_exit_pass();
+ return 0;
+}
diff --git a/tools/testing/selftests/filesystems/overlayfs/log.h b/tools/testing/selftests/filesystems/overlayfs/log.h
new file mode 100644
index 000000000000..db64df2a8483
--- /dev/null
+++ b/tools/testing/selftests/filesystems/overlayfs/log.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SELFTEST_TIMENS_LOG_H__
+#define __SELFTEST_TIMENS_LOG_H__
+
+#define pr_msg(fmt, lvl, ...) \
+ ksft_print_msg("[%s] (%s:%d)\t" fmt "\n", \
+ lvl, __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define pr_p(func, fmt, ...) func(fmt ": %m", ##__VA_ARGS__)
+
+#define pr_err(fmt, ...) \
+ ({ \
+ ksft_test_result_error(fmt "\n", ##__VA_ARGS__); \
+ -1; \
+ })
+
+#define pr_fail(fmt, ...) \
+ ({ \
+ ksft_test_result_fail(fmt, ##__VA_ARGS__); \
+ -1; \
+ })
+
+#define pr_perror(fmt, ...) pr_p(pr_err, fmt, ##__VA_ARGS__)
+
+#endif
diff --git a/tools/testing/selftests/filesystems/statmount/.gitignore b/tools/testing/selftests/filesystems/statmount/.gitignore
new file mode 100644
index 000000000000..82a4846cbc4b
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/*_test
diff --git a/tools/testing/selftests/filesystems/statmount/Makefile b/tools/testing/selftests/filesystems/statmount/Makefile
new file mode 100644
index 000000000000..07a0d5b545ca
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
+TEST_GEN_PROGS := statmount_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
new file mode 100644
index 000000000000..3eafd7da58e2
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <stdint.h>
+#include <sched.h>
+#include <fcntl.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <linux/mount.h>
+#include <linux/stat.h>
+#include <asm/unistd.h>
+
+#include "../../kselftest.h"
+
+static const char *const known_fs[] = {
+ "9p", "adfs", "affs", "afs", "aio", "anon_inodefs", "apparmorfs",
+ "autofs", "bcachefs", "bdev", "befs", "bfs", "binder", "binfmt_misc",
+ "bpf", "btrfs", "btrfs_test_fs", "ceph", "cgroup", "cgroup2", "cifs",
+ "coda", "configfs", "cpuset", "cramfs", "cxl", "dax", "debugfs",
+ "devpts", "devtmpfs", "dmabuf", "drm", "ecryptfs", "efivarfs", "efs",
+ "erofs", "exfat", "ext2", "ext3", "ext4", "f2fs", "functionfs",
+ "fuse", "fuseblk", "fusectl", "gadgetfs", "gfs2", "gfs2meta", "hfs",
+ "hfsplus", "hostfs", "hpfs", "hugetlbfs", "ibmasmfs", "iomem",
+ "ipathfs", "iso9660", "jffs2", "jfs", "minix", "mqueue", "msdos",
+ "nfs", "nfs4", "nfsd", "nilfs2", "nsfs", "ntfs", "ntfs3", "ocfs2",
+ "ocfs2_dlmfs", "ocxlflash", "omfs", "openpromfs", "overlay", "pipefs",
+ "proc", "pstore", "pvfs2", "qnx4", "qnx6", "ramfs", "reiserfs",
+ "resctrl", "romfs", "rootfs", "rpc_pipefs", "s390_hypfs", "secretmem",
+ "securityfs", "selinuxfs", "smackfs", "smb3", "sockfs", "spufs",
+ "squashfs", "sysfs", "sysv", "tmpfs", "tracefs", "ubifs", "udf",
+ "ufs", "v7", "vboxsf", "vfat", "virtiofs", "vxfs", "xenfs", "xfs",
+ "zonefs", NULL };
+
+static int statmount(uint64_t mnt_id, uint64_t mask, struct statmount *buf,
+ size_t bufsize, unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = mask,
+ };
+
+ return syscall(__NR_statmount, &req, buf, bufsize, flags);
+}
+
+static struct statmount *statmount_alloc(uint64_t mnt_id, uint64_t mask, unsigned int flags)
+{
+ size_t bufsize = 1 << 15;
+ struct statmount *buf = NULL, *tmp = alloca(bufsize);
+ int tofree = 0;
+ int ret;
+
+ for (;;) {
+ ret = statmount(mnt_id, mask, tmp, bufsize, flags);
+ if (ret != -1)
+ break;
+ if (tofree)
+ free(tmp);
+ if (errno != EOVERFLOW)
+ return NULL;
+ bufsize <<= 1;
+ tofree = 1;
+ tmp = malloc(bufsize);
+ if (!tmp)
+ return NULL;
+ }
+ buf = malloc(tmp->size);
+ if (buf)
+ memcpy(buf, tmp, tmp->size);
+ if (tofree)
+ free(tmp);
+
+ return buf;
+}
+
+static void write_file(const char *path, const char *val)
+{
+ int fd = open(path, O_WRONLY);
+ size_t len = strlen(val);
+ int ret;
+
+ if (fd == -1)
+ ksft_exit_fail_msg("opening %s for write: %s\n", path, strerror(errno));
+
+ ret = write(fd, val, len);
+ if (ret == -1)
+ ksft_exit_fail_msg("writing to %s: %s\n", path, strerror(errno));
+ if (ret != len)
+ ksft_exit_fail_msg("short write to %s\n", path);
+
+ ret = close(fd);
+ if (ret == -1)
+ ksft_exit_fail_msg("closing %s\n", path);
+}
+
+static uint64_t get_mnt_id(const char *name, const char *path, uint64_t mask)
+{
+ struct statx sx;
+ int ret;
+
+ ret = statx(AT_FDCWD, path, 0, mask, &sx);
+ if (ret == -1)
+ ksft_exit_fail_msg("retrieving %s mount ID for %s: %s\n",
+ mask & STATX_MNT_ID_UNIQUE ? "unique" : "old",
+ name, strerror(errno));
+ if (!(sx.stx_mask & mask))
+ ksft_exit_fail_msg("no %s mount ID available for %s\n",
+ mask & STATX_MNT_ID_UNIQUE ? "unique" : "old",
+ name);
+
+ return sx.stx_mnt_id;
+}
+
+
+static char root_mntpoint[] = "/tmp/statmount_test_root.XXXXXX";
+static int orig_root;
+static uint64_t root_id, parent_id;
+static uint32_t old_root_id, old_parent_id;
+
+
+static void cleanup_namespace(void)
+{
+ fchdir(orig_root);
+ chroot(".");
+ umount2(root_mntpoint, MNT_DETACH);
+ rmdir(root_mntpoint);
+}
+
+static void setup_namespace(void)
+{
+ int ret;
+ char buf[32];
+ uid_t uid = getuid();
+ gid_t gid = getgid();
+
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER);
+ if (ret == -1)
+ ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
+ strerror(errno));
+
+ sprintf(buf, "0 %d 1", uid);
+ write_file("/proc/self/uid_map", buf);
+ write_file("/proc/self/setgroups", "deny");
+ sprintf(buf, "0 %d 1", gid);
+ write_file("/proc/self/gid_map", buf);
+
+ ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
+ if (ret == -1)
+ ksft_exit_fail_msg("making mount tree private: %s\n",
+ strerror(errno));
+
+ if (!mkdtemp(root_mntpoint))
+ ksft_exit_fail_msg("creating temporary directory %s: %s\n",
+ root_mntpoint, strerror(errno));
+
+ old_parent_id = get_mnt_id("parent", root_mntpoint, STATX_MNT_ID);
+ parent_id = get_mnt_id("parent", root_mntpoint, STATX_MNT_ID_UNIQUE);
+
+ orig_root = open("/", O_PATH);
+ if (orig_root == -1)
+ ksft_exit_fail_msg("opening root directory: %s",
+ strerror(errno));
+
+ atexit(cleanup_namespace);
+
+ ret = mount(root_mntpoint, root_mntpoint, NULL, MS_BIND, NULL);
+ if (ret == -1)
+ ksft_exit_fail_msg("mounting temp root %s: %s\n",
+ root_mntpoint, strerror(errno));
+
+ ret = chroot(root_mntpoint);
+ if (ret == -1)
+ ksft_exit_fail_msg("chroot to temp root %s: %s\n",
+ root_mntpoint, strerror(errno));
+
+ ret = chdir("/");
+ if (ret == -1)
+ ksft_exit_fail_msg("chdir to root: %s\n", strerror(errno));
+
+ old_root_id = get_mnt_id("root", "/", STATX_MNT_ID);
+ root_id = get_mnt_id("root", "/", STATX_MNT_ID_UNIQUE);
+}
+
+static int setup_mount_tree(int log2_num)
+{
+ int ret, i;
+
+ ret = mount("", "/", NULL, MS_REC|MS_SHARED, NULL);
+ if (ret == -1) {
+ ksft_test_result_fail("making mount tree shared: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ for (i = 0; i < log2_num; i++) {
+ ret = mount("/", "/", NULL, MS_BIND, NULL);
+ if (ret == -1) {
+ ksft_test_result_fail("mounting submount %s: %s\n",
+ root_mntpoint, strerror(errno));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static ssize_t listmount(uint64_t mnt_id, uint64_t last_mnt_id,
+ uint64_t list[], size_t num, unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = last_mnt_id,
+ };
+
+ return syscall(__NR_listmount, &req, list, num, flags);
+}
+
+static void test_listmount_empty_root(void)
+{
+ ssize_t res;
+ const unsigned int size = 32;
+ uint64_t list[size];
+
+ res = listmount(LSMT_ROOT, 0, list, size, 0);
+ if (res == -1) {
+ ksft_test_result_fail("listmount: %s\n", strerror(errno));
+ return;
+ }
+ if (res != 1) {
+ ksft_test_result_fail("listmount result is %zi != 1\n", res);
+ return;
+ }
+
+ if (list[0] != root_id) {
+ ksft_test_result_fail("listmount ID doesn't match 0x%llx != 0x%llx\n",
+ (unsigned long long) list[0],
+ (unsigned long long) root_id);
+ return;
+ }
+
+ ksft_test_result_pass("listmount empty root\n");
+}
+
+static void test_statmount_zero_mask(void)
+{
+ struct statmount sm;
+ int ret;
+
+ ret = statmount(root_id, 0, &sm, sizeof(sm), 0);
+ if (ret == -1) {
+ ksft_test_result_fail("statmount zero mask: %s\n",
+ strerror(errno));
+ return;
+ }
+ if (sm.size != sizeof(sm)) {
+ ksft_test_result_fail("unexpected size: %u != %u\n",
+ sm.size, (uint32_t) sizeof(sm));
+ return;
+ }
+ if (sm.mask != 0) {
+ ksft_test_result_fail("unexpected mask: 0x%llx != 0x0\n",
+ (unsigned long long) sm.mask);
+ return;
+ }
+
+ ksft_test_result_pass("statmount zero mask\n");
+}
+
+static void test_statmount_mnt_basic(void)
+{
+ struct statmount sm;
+ int ret;
+ uint64_t mask = STATMOUNT_MNT_BASIC;
+
+ ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ if (ret == -1) {
+ ksft_test_result_fail("statmount mnt basic: %s\n",
+ strerror(errno));
+ return;
+ }
+ if (sm.size != sizeof(sm)) {
+ ksft_test_result_fail("unexpected size: %u != %u\n",
+ sm.size, (uint32_t) sizeof(sm));
+ return;
+ }
+ if (sm.mask != mask) {
+ ksft_test_result_skip("statmount mnt basic unavailable\n");
+ return;
+ }
+
+ if (sm.mnt_id != root_id) {
+ ksft_test_result_fail("unexpected root ID: 0x%llx != 0x%llx\n",
+ (unsigned long long) sm.mnt_id,
+ (unsigned long long) root_id);
+ return;
+ }
+
+ if (sm.mnt_id_old != old_root_id) {
+ ksft_test_result_fail("unexpected old root ID: %u != %u\n",
+ sm.mnt_id_old, old_root_id);
+ return;
+ }
+
+ if (sm.mnt_parent_id != parent_id) {
+ ksft_test_result_fail("unexpected parent ID: 0x%llx != 0x%llx\n",
+ (unsigned long long) sm.mnt_parent_id,
+ (unsigned long long) parent_id);
+ return;
+ }
+
+ if (sm.mnt_parent_id_old != old_parent_id) {
+ ksft_test_result_fail("unexpected old parent ID: %u != %u\n",
+ sm.mnt_parent_id_old, old_parent_id);
+ return;
+ }
+
+ if (sm.mnt_propagation != MS_PRIVATE) {
+ ksft_test_result_fail("unexpected propagation: 0x%llx\n",
+ (unsigned long long) sm.mnt_propagation);
+ return;
+ }
+
+ ksft_test_result_pass("statmount mnt basic\n");
+}
+
+
+static void test_statmount_sb_basic(void)
+{
+ struct statmount sm;
+ int ret;
+ uint64_t mask = STATMOUNT_SB_BASIC;
+ struct statx sx;
+ struct statfs sf;
+
+ ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ if (ret == -1) {
+ ksft_test_result_fail("statmount sb basic: %s\n",
+ strerror(errno));
+ return;
+ }
+ if (sm.size != sizeof(sm)) {
+ ksft_test_result_fail("unexpected size: %u != %u\n",
+ sm.size, (uint32_t) sizeof(sm));
+ return;
+ }
+ if (sm.mask != mask) {
+ ksft_test_result_skip("statmount sb basic unavailable\n");
+ return;
+ }
+
+ ret = statx(AT_FDCWD, "/", 0, 0, &sx);
+ if (ret == -1) {
+ ksft_test_result_fail("stat root failed: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ if (sm.sb_dev_major != sx.stx_dev_major ||
+ sm.sb_dev_minor != sx.stx_dev_minor) {
+ ksft_test_result_fail("unexpected sb dev %u:%u != %u:%u\n",
+ sm.sb_dev_major, sm.sb_dev_minor,
+ sx.stx_dev_major, sx.stx_dev_minor);
+ return;
+ }
+
+ ret = statfs("/", &sf);
+ if (ret == -1) {
+ ksft_test_result_fail("statfs root failed: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ if (sm.sb_magic != sf.f_type) {
+ ksft_test_result_fail("unexpected sb magic: 0x%llx != 0x%lx\n",
+ (unsigned long long) sm.sb_magic,
+ sf.f_type);
+ return;
+ }
+
+ ksft_test_result_pass("statmount sb basic\n");
+}
+
+static void test_statmount_mnt_point(void)
+{
+ struct statmount *sm;
+
+ sm = statmount_alloc(root_id, STATMOUNT_MNT_POINT, 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount mount point: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ if (strcmp(sm->str + sm->mnt_point, "/") != 0) {
+ ksft_test_result_fail("unexpected mount point: '%s' != '/'\n",
+ sm->str + sm->mnt_point);
+ goto out;
+ }
+ ksft_test_result_pass("statmount mount point\n");
+out:
+ free(sm);
+}
+
+static void test_statmount_mnt_root(void)
+{
+ struct statmount *sm;
+ const char *mnt_root, *last_dir, *last_root;
+
+ last_dir = strrchr(root_mntpoint, '/');
+ assert(last_dir);
+ last_dir++;
+
+ sm = statmount_alloc(root_id, STATMOUNT_MNT_ROOT, 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount mount root: %s\n",
+ strerror(errno));
+ return;
+ }
+ mnt_root = sm->str + sm->mnt_root;
+ last_root = strrchr(mnt_root, '/');
+ if (last_root)
+ last_root++;
+ else
+ last_root = mnt_root;
+
+ if (strcmp(last_dir, last_root) != 0) {
+ ksft_test_result_fail("unexpected mount root last component: '%s' != '%s'\n",
+ last_root, last_dir);
+ goto out;
+ }
+ ksft_test_result_pass("statmount mount root\n");
+out:
+ free(sm);
+}
+
+static void test_statmount_fs_type(void)
+{
+ struct statmount *sm;
+ const char *fs_type;
+ const char *const *s;
+
+ sm = statmount_alloc(root_id, STATMOUNT_FS_TYPE, 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount fs type: %s\n",
+ strerror(errno));
+ return;
+ }
+ fs_type = sm->str + sm->fs_type;
+ for (s = known_fs; s != NULL; s++) {
+ if (strcmp(fs_type, *s) == 0)
+ break;
+ }
+ if (!s)
+ ksft_print_msg("unknown filesystem type: %s\n", fs_type);
+
+ ksft_test_result_pass("statmount fs type\n");
+ free(sm);
+}
+
+static void test_statmount_string(uint64_t mask, size_t off, const char *name)
+{
+ struct statmount *sm;
+ size_t len, shortsize, exactsize;
+ uint32_t start, i;
+ int ret;
+
+ sm = statmount_alloc(root_id, mask, 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount %s: %s\n", name,
+ strerror(errno));
+ goto out;
+ }
+ if (sm->size < sizeof(*sm)) {
+ ksft_test_result_fail("unexpected size: %u < %u\n",
+ sm->size, (uint32_t) sizeof(*sm));
+ goto out;
+ }
+ if (sm->mask != mask) {
+ ksft_test_result_skip("statmount %s unavailable\n", name);
+ goto out;
+ }
+ len = sm->size - sizeof(*sm);
+ start = ((uint32_t *) sm)[off];
+
+ for (i = start;; i++) {
+ if (i >= len) {
+ ksft_test_result_fail("string out of bounds\n");
+ goto out;
+ }
+ if (!sm->str[i])
+ break;
+ }
+ exactsize = sm->size;
+ shortsize = sizeof(*sm) + i;
+
+ ret = statmount(root_id, mask, sm, exactsize, 0);
+ if (ret == -1) {
+ ksft_test_result_fail("statmount exact size: %s\n",
+ strerror(errno));
+ goto out;
+ }
+ errno = 0;
+ ret = statmount(root_id, mask, sm, shortsize, 0);
+ if (ret != -1 || errno != EOVERFLOW) {
+ ksft_test_result_fail("should have failed with EOVERFLOW: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ ksft_test_result_pass("statmount string %s\n", name);
+out:
+ free(sm);
+}
+
+static void test_listmount_tree(void)
+{
+ ssize_t res;
+ const unsigned int log2_num = 4;
+ const unsigned int step = 3;
+ const unsigned int size = (1 << log2_num) + step + 1;
+ size_t num, expect = 1 << log2_num;
+ uint64_t list[size];
+ uint64_t list2[size];
+ size_t i;
+
+
+ res = setup_mount_tree(log2_num);
+ if (res == -1)
+ return;
+
+ num = res = listmount(LSMT_ROOT, 0, list, size, 0);
+ if (res == -1) {
+ ksft_test_result_fail("listmount: %s\n", strerror(errno));
+ return;
+ }
+ if (num != expect) {
+ ksft_test_result_fail("listmount result is %zi != %zi\n",
+ res, expect);
+ return;
+ }
+
+ for (i = 0; i < size - step;) {
+ res = listmount(LSMT_ROOT, i ? list2[i - 1] : 0, list2 + i, step, 0);
+ if (res == -1)
+ ksft_test_result_fail("short listmount: %s\n",
+ strerror(errno));
+ i += res;
+ if (res < step)
+ break;
+ }
+ if (i != num) {
+ ksft_test_result_fail("different number of entries: %zu != %zu\n",
+ i, num);
+ return;
+ }
+ for (i = 0; i < num; i++) {
+ if (list2[i] != list[i]) {
+ ksft_test_result_fail("different value for entry %zu: 0x%llx != 0x%llx\n",
+ i,
+ (unsigned long long) list2[i],
+ (unsigned long long) list[i]);
+ }
+ }
+
+ ksft_test_result_pass("listmount tree\n");
+}
+
+#define str_off(memb) (offsetof(struct statmount, memb) / sizeof(uint32_t))
+
+int main(void)
+{
+ int ret;
+ uint64_t all_mask = STATMOUNT_SB_BASIC | STATMOUNT_MNT_BASIC |
+ STATMOUNT_PROPAGATE_FROM | STATMOUNT_MNT_ROOT |
+ STATMOUNT_MNT_POINT | STATMOUNT_FS_TYPE;
+
+ ksft_print_header();
+
+ ret = statmount(0, 0, NULL, 0, 0);
+ assert(ret == -1);
+ if (errno == ENOSYS)
+ ksft_exit_skip("statmount() syscall not supported\n");
+
+ setup_namespace();
+
+ ksft_set_plan(14);
+ test_listmount_empty_root();
+ test_statmount_zero_mask();
+ test_statmount_mnt_basic();
+ test_statmount_sb_basic();
+ test_statmount_mnt_root();
+ test_statmount_mnt_point();
+ test_statmount_fs_type();
+ test_statmount_string(STATMOUNT_MNT_ROOT, str_off(mnt_root), "mount root");
+ test_statmount_string(STATMOUNT_MNT_POINT, str_off(mnt_point), "mount point");
+ test_statmount_string(STATMOUNT_FS_TYPE, str_off(fs_type), "fs type");
+ test_statmount_string(all_mask, str_off(mnt_root), "mount root & all");
+ test_statmount_string(all_mask, str_off(mnt_point), "mount point & all");
+ test_statmount_string(all_mask, str_off(fs_type), "fs type & all");
+
+ test_listmount_tree();
+
+
+ if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 5ea78986e665..9d51b5691349 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -42,13 +42,12 @@ struct pmreg_sets {
static uint64_t get_pmcr_n(uint64_t pmcr)
{
- return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+ return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
}
static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
{
- *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
- *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+ u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
}
static uint64_t get_counters_mask(uint64_t n)
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 18e1f86a6234..50818904397c 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -589,7 +589,7 @@ TEST_F_FORK(layout1, file_and_dir_access_rights)
ASSERT_EQ(0, close(ruleset_fd));
}
-TEST_F_FORK(layout0, unknown_access_rights)
+TEST_F_FORK(layout0, ruleset_with_unknown_access)
{
__u64 access_mask;
@@ -605,6 +605,67 @@ TEST_F_FORK(layout0, unknown_access_rights)
}
}
+TEST_F_FORK(layout0, rule_with_unknown_access)
+{
+ __u64 access;
+ struct landlock_path_beneath_attr path_beneath = {};
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = ACCESS_ALL,
+ };
+ const int ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ path_beneath.parent_fd =
+ open(TMP_DIR, O_PATH | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+
+ for (access = 1ULL << 63; access != ACCESS_LAST; access >>= 1) {
+ path_beneath.allowed_access = access;
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd,
+ LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ EXPECT_EQ(EINVAL, errno);
+ }
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F_FORK(layout1, rule_with_unhandled_access)
+{
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE,
+ };
+ struct landlock_path_beneath_attr path_beneath = {};
+ int ruleset_fd;
+ __u64 access;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ path_beneath.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+
+ for (access = 1; access > 0; access <<= 1) {
+ int err;
+
+ path_beneath.allowed_access = access;
+ err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0);
+ if (access == ruleset_attr.handled_access_fs) {
+ EXPECT_EQ(0, err);
+ } else {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EINVAL, errno);
+ }
+ }
+
+ EXPECT_EQ(0, close(path_beneath.parent_fd));
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
static void add_path_beneath(struct __test_metadata *const _metadata,
const int ruleset_fd, const __u64 allowed_access,
const char *const path)
@@ -3627,7 +3688,7 @@ FIXTURE_TEARDOWN(ftruncate)
FIXTURE_VARIANT(ftruncate)
{
const __u64 handled;
- const __u64 permitted;
+ const __u64 allowed;
const int expected_open_result;
const int expected_ftruncate_result;
};
@@ -3636,7 +3697,7 @@ FIXTURE_VARIANT(ftruncate)
FIXTURE_VARIANT_ADD(ftruncate, w_w) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE,
- .permitted = LANDLOCK_ACCESS_FS_WRITE_FILE,
+ .allowed = LANDLOCK_ACCESS_FS_WRITE_FILE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
@@ -3645,7 +3706,7 @@ FIXTURE_VARIANT_ADD(ftruncate, w_w) {
FIXTURE_VARIANT_ADD(ftruncate, t_t) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_TRUNCATE,
- .permitted = LANDLOCK_ACCESS_FS_TRUNCATE,
+ .allowed = LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
@@ -3654,7 +3715,7 @@ FIXTURE_VARIANT_ADD(ftruncate, t_t) {
FIXTURE_VARIANT_ADD(ftruncate, wt_w) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
- .permitted = LANDLOCK_ACCESS_FS_WRITE_FILE,
+ .allowed = LANDLOCK_ACCESS_FS_WRITE_FILE,
.expected_open_result = 0,
.expected_ftruncate_result = EACCES,
};
@@ -3663,8 +3724,7 @@ FIXTURE_VARIANT_ADD(ftruncate, wt_w) {
FIXTURE_VARIANT_ADD(ftruncate, wt_wt) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
- .permitted = LANDLOCK_ACCESS_FS_WRITE_FILE |
- LANDLOCK_ACCESS_FS_TRUNCATE,
+ .allowed = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
@@ -3673,7 +3733,7 @@ FIXTURE_VARIANT_ADD(ftruncate, wt_wt) {
FIXTURE_VARIANT_ADD(ftruncate, wt_t) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
- .permitted = LANDLOCK_ACCESS_FS_TRUNCATE,
+ .allowed = LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = EACCES,
};
@@ -3683,7 +3743,7 @@ TEST_F_FORK(ftruncate, open_and_ftruncate)
const struct rule rules[] = {
{
.path = path,
- .access = variant->permitted,
+ .access = variant->allowed,
},
{},
};
@@ -3724,7 +3784,7 @@ TEST_F_FORK(ftruncate, open_and_ftruncate_in_different_processes)
const struct rule rules[] = {
{
.path = path,
- .access = variant->permitted,
+ .access = variant->allowed,
},
{},
};
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index 929e21c4db05..ea5f727dd257 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -1260,7 +1260,7 @@ TEST_F(mini, network_access_rights)
}
/* Checks invalid attribute, out of landlock network access range. */
-TEST_F(mini, unknown_access_rights)
+TEST_F(mini, ruleset_with_unknown_access)
{
__u64 access_mask;
@@ -1276,6 +1276,63 @@ TEST_F(mini, unknown_access_rights)
}
}
+TEST_F(mini, rule_with_unknown_access)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = ACCESS_ALL,
+ };
+ struct landlock_net_port_attr net_port = {
+ .port = sock_port_start,
+ };
+ int ruleset_fd;
+ __u64 access;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ for (access = 1ULL << 63; access != ACCESS_LAST; access >>= 1) {
+ net_port.allowed_access = access;
+ EXPECT_EQ(-1,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &net_port, 0));
+ EXPECT_EQ(EINVAL, errno);
+ }
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F(mini, rule_with_unhandled_access)
+{
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP,
+ };
+ struct landlock_net_port_attr net_port = {
+ .port = sock_port_start,
+ };
+ int ruleset_fd;
+ __u64 access;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ for (access = 1; access > 0; access <<= 1) {
+ int err;
+
+ net_port.allowed_access = access;
+ err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &net_port, 0);
+ if (access == ruleset_attr.handled_access_net) {
+ EXPECT_EQ(0, err);
+ } else {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EINVAL, errno);
+ }
+ }
+
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
TEST_F(mini, inval)
{
const struct landlock_ruleset_attr ruleset_attr = {
diff --git a/tools/testing/selftests/lsm/.gitignore b/tools/testing/selftests/lsm/.gitignore
new file mode 100644
index 000000000000..bd68f6c3fd07
--- /dev/null
+++ b/tools/testing/selftests/lsm/.gitignore
@@ -0,0 +1 @@
+/*_test
diff --git a/tools/testing/selftests/lsm/Makefile b/tools/testing/selftests/lsm/Makefile
new file mode 100644
index 000000000000..3f80c0bc093d
--- /dev/null
+++ b/tools/testing/selftests/lsm/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# First run: make -C ../../../.. headers_install
+
+CFLAGS += -Wall -O2 $(KHDR_INCLUDES)
+LOCAL_HDRS += common.h
+
+TEST_GEN_PROGS := lsm_get_self_attr_test lsm_list_modules_test \
+ lsm_set_self_attr_test
+
+include ../lib.mk
+
+$(OUTPUT)/lsm_get_self_attr_test: lsm_get_self_attr_test.c common.c
+$(OUTPUT)/lsm_set_self_attr_test: lsm_set_self_attr_test.c common.c
+$(OUTPUT)/lsm_list_modules_test: lsm_list_modules_test.c common.c
+
+EXTRA_CLEAN = $(OUTPUT)/common.o
diff --git a/tools/testing/selftests/lsm/common.c b/tools/testing/selftests/lsm/common.c
new file mode 100644
index 000000000000..9ad258912646
--- /dev/null
+++ b/tools/testing/selftests/lsm/common.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Security Module infrastructure tests
+ *
+ * Copyright © 2023 Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#define _GNU_SOURCE
+#include <linux/lsm.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include "common.h"
+
+#define PROCATTR "/proc/self/attr/"
+
+int read_proc_attr(const char *attr, char *value, size_t size)
+{
+ int fd;
+ int len;
+ char *path;
+
+ len = strlen(PROCATTR) + strlen(attr) + 1;
+ path = calloc(len, 1);
+ if (path == NULL)
+ return -1;
+ sprintf(path, "%s%s", PROCATTR, attr);
+
+ fd = open(path, O_RDONLY);
+ free(path);
+
+ if (fd < 0)
+ return -1;
+ len = read(fd, value, size);
+
+ close(fd);
+
+ /* Ensure value is terminated */
+ if (len <= 0 || len == size)
+ return -1;
+ value[len] = '\0';
+
+ path = strchr(value, '\n');
+ if (path)
+ *path = '\0';
+
+ return 0;
+}
+
+int read_sysfs_lsms(char *lsms, size_t size)
+{
+ FILE *fp;
+ size_t red;
+
+ fp = fopen("/sys/kernel/security/lsm", "r");
+ if (fp == NULL)
+ return -1;
+ red = fread(lsms, 1, size, fp);
+ fclose(fp);
+
+ if (red <= 0 || red == size)
+ return -1;
+ lsms[red] = '\0';
+ return 0;
+}
+
+int attr_lsm_count(void)
+{
+ char *names = calloc(sysconf(_SC_PAGESIZE), 1);
+ int count = 0;
+
+ if (!names)
+ return 0;
+
+ if (read_sysfs_lsms(names, sysconf(_SC_PAGESIZE)))
+ return 0;
+
+ if (strstr(names, "selinux"))
+ count++;
+ if (strstr(names, "smack"))
+ count++;
+ if (strstr(names, "apparmor"))
+ count++;
+
+ return count;
+}
diff --git a/tools/testing/selftests/lsm/common.h b/tools/testing/selftests/lsm/common.h
new file mode 100644
index 000000000000..d404329e5eeb
--- /dev/null
+++ b/tools/testing/selftests/lsm/common.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module infrastructure tests
+ *
+ * Copyright © 2023 Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#ifndef lsm_get_self_attr
+static inline int lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t *size, __u32 flags)
+{
+ return syscall(__NR_lsm_get_self_attr, attr, ctx, size, flags);
+}
+#endif
+
+#ifndef lsm_set_self_attr
+static inline int lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, __u32 flags)
+{
+ return syscall(__NR_lsm_set_self_attr, attr, ctx, size, flags);
+}
+#endif
+
+#ifndef lsm_list_modules
+static inline int lsm_list_modules(__u64 *ids, size_t *size, __u32 flags)
+{
+ return syscall(__NR_lsm_list_modules, ids, size, flags);
+}
+#endif
+
+extern int read_proc_attr(const char *attr, char *value, size_t size);
+extern int read_sysfs_lsms(char *lsms, size_t size);
+int attr_lsm_count(void);
diff --git a/tools/testing/selftests/lsm/config b/tools/testing/selftests/lsm/config
new file mode 100644
index 000000000000..1c0c4c020f9c
--- /dev/null
+++ b/tools/testing/selftests/lsm/config
@@ -0,0 +1,3 @@
+CONFIG_SYSFS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/lsm/lsm_get_self_attr_test.c b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c
new file mode 100644
index 000000000000..e0e313d9047a
--- /dev/null
+++ b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Security Module infrastructure tests
+ * Tests for the lsm_get_self_attr system call
+ *
+ * Copyright © 2022 Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#define _GNU_SOURCE
+#include <linux/lsm.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include "../kselftest_harness.h"
+#include "common.h"
+
+static struct lsm_ctx *next_ctx(struct lsm_ctx *ctxp)
+{
+ void *vp;
+
+ vp = (void *)ctxp + sizeof(*ctxp) + ctxp->ctx_len;
+ return (struct lsm_ctx *)vp;
+}
+
+TEST(size_null_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+
+ ASSERT_NE(NULL, ctx);
+ errno = 0;
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, NULL, 0));
+ ASSERT_EQ(EINVAL, errno);
+
+ free(ctx);
+}
+
+TEST(ctx_null_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ size_t size = page_size;
+ int rc;
+
+ rc = lsm_get_self_attr(LSM_ATTR_CURRENT, NULL, &size, 0);
+
+ if (attr_lsm_count()) {
+ ASSERT_NE(-1, rc);
+ ASSERT_NE(1, size);
+ } else {
+ ASSERT_EQ(-1, rc);
+ }
+}
+
+TEST(size_too_small_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ size_t size = 1;
+
+ ASSERT_NE(NULL, ctx);
+ errno = 0;
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size, 0));
+ if (attr_lsm_count()) {
+ ASSERT_EQ(E2BIG, errno);
+ } else {
+ ASSERT_EQ(EOPNOTSUPP, errno);
+ }
+ ASSERT_NE(1, size);
+
+ free(ctx);
+}
+
+TEST(flags_zero_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ __u64 *syscall_lsms = calloc(page_size, 1);
+ size_t size;
+ int lsmcount;
+ int i;
+
+ ASSERT_NE(NULL, ctx);
+ errno = 0;
+ size = page_size;
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size,
+ LSM_FLAG_SINGLE));
+ ASSERT_EQ(EINVAL, errno);
+ ASSERT_EQ(page_size, size);
+
+ lsmcount = syscall(__NR_lsm_list_modules, syscall_lsms, &size, 0);
+ ASSERT_LE(1, lsmcount);
+ ASSERT_NE(NULL, syscall_lsms);
+
+ for (i = 0; i < lsmcount; i++) {
+ errno = 0;
+ size = page_size;
+ ctx->id = syscall_lsms[i];
+
+ if (syscall_lsms[i] == LSM_ID_SELINUX ||
+ syscall_lsms[i] == LSM_ID_SMACK ||
+ syscall_lsms[i] == LSM_ID_APPARMOR) {
+ ASSERT_EQ(1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx,
+ &size, LSM_FLAG_SINGLE));
+ } else {
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx,
+ &size,
+ LSM_FLAG_SINGLE));
+ }
+ }
+
+ free(ctx);
+}
+
+TEST(flags_overset_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ size_t size;
+
+ ASSERT_NE(NULL, ctx);
+
+ errno = 0;
+ size = page_size;
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT | LSM_ATTR_PREV, ctx,
+ &size, 0));
+ ASSERT_EQ(EOPNOTSUPP, errno);
+
+ errno = 0;
+ size = page_size;
+ ASSERT_EQ(-1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size,
+ LSM_FLAG_SINGLE |
+ (LSM_FLAG_SINGLE << 1)));
+ ASSERT_EQ(EINVAL, errno);
+
+ free(ctx);
+}
+
+TEST(basic_lsm_get_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ size_t size = page_size;
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ struct lsm_ctx *tctx = NULL;
+ __u64 *syscall_lsms = calloc(page_size, 1);
+ char *attr = calloc(page_size, 1);
+ int cnt_current = 0;
+ int cnt_exec = 0;
+ int cnt_fscreate = 0;
+ int cnt_keycreate = 0;
+ int cnt_prev = 0;
+ int cnt_sockcreate = 0;
+ int lsmcount;
+ int count;
+ int i;
+
+ ASSERT_NE(NULL, ctx);
+ ASSERT_NE(NULL, syscall_lsms);
+
+ lsmcount = syscall(__NR_lsm_list_modules, syscall_lsms, &size, 0);
+ ASSERT_LE(1, lsmcount);
+
+ for (i = 0; i < lsmcount; i++) {
+ switch (syscall_lsms[i]) {
+ case LSM_ID_SELINUX:
+ cnt_current++;
+ cnt_exec++;
+ cnt_fscreate++;
+ cnt_keycreate++;
+ cnt_prev++;
+ cnt_sockcreate++;
+ break;
+ case LSM_ID_SMACK:
+ cnt_current++;
+ break;
+ case LSM_ID_APPARMOR:
+ cnt_current++;
+ cnt_exec++;
+ cnt_prev++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cnt_current) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size, 0);
+ ASSERT_EQ(cnt_current, count);
+ tctx = ctx;
+ ASSERT_EQ(0, read_proc_attr("current", attr, page_size));
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+ if (cnt_exec) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_EXEC, ctx, &size, 0);
+ ASSERT_GE(cnt_exec, count);
+ if (count > 0) {
+ tctx = ctx;
+ if (read_proc_attr("exec", attr, page_size) == 0)
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ }
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+ if (cnt_fscreate) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_FSCREATE, ctx, &size, 0);
+ ASSERT_GE(cnt_fscreate, count);
+ if (count > 0) {
+ tctx = ctx;
+ if (read_proc_attr("fscreate", attr, page_size) == 0)
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ }
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+ if (cnt_keycreate) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_KEYCREATE, ctx, &size, 0);
+ ASSERT_GE(cnt_keycreate, count);
+ if (count > 0) {
+ tctx = ctx;
+ if (read_proc_attr("keycreate", attr, page_size) == 0)
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ }
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+ if (cnt_prev) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_PREV, ctx, &size, 0);
+ ASSERT_GE(cnt_prev, count);
+ if (count > 0) {
+ tctx = ctx;
+ ASSERT_EQ(0, read_proc_attr("prev", attr, page_size));
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+ }
+ if (cnt_sockcreate) {
+ size = page_size;
+ count = lsm_get_self_attr(LSM_ATTR_SOCKCREATE, ctx, &size, 0);
+ ASSERT_GE(cnt_sockcreate, count);
+ if (count > 0) {
+ tctx = ctx;
+ if (read_proc_attr("sockcreate", attr, page_size) == 0)
+ ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr));
+ }
+ for (i = 1; i < count; i++) {
+ tctx = next_ctx(tctx);
+ ASSERT_NE(0, strcmp((char *)tctx->ctx, attr));
+ }
+ }
+
+ free(ctx);
+ free(attr);
+ free(syscall_lsms);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/lsm/lsm_list_modules_test.c b/tools/testing/selftests/lsm/lsm_list_modules_test.c
new file mode 100644
index 000000000000..9df29b1e3497
--- /dev/null
+++ b/tools/testing/selftests/lsm/lsm_list_modules_test.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Security Module infrastructure tests
+ * Tests for the lsm_list_modules system call
+ *
+ * Copyright © 2022 Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#define _GNU_SOURCE
+#include <linux/lsm.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include "../kselftest_harness.h"
+#include "common.h"
+
+TEST(size_null_lsm_list_modules)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ __u64 *syscall_lsms = calloc(page_size, 1);
+
+ ASSERT_NE(NULL, syscall_lsms);
+ errno = 0;
+ ASSERT_EQ(-1, lsm_list_modules(syscall_lsms, NULL, 0));
+ ASSERT_EQ(EFAULT, errno);
+
+ free(syscall_lsms);
+}
+
+TEST(ids_null_lsm_list_modules)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ size_t size = page_size;
+
+ errno = 0;
+ ASSERT_EQ(-1, lsm_list_modules(NULL, &size, 0));
+ ASSERT_EQ(EFAULT, errno);
+ ASSERT_NE(1, size);
+}
+
+TEST(size_too_small_lsm_list_modules)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ __u64 *syscall_lsms = calloc(page_size, 1);
+ size_t size = 1;
+
+ ASSERT_NE(NULL, syscall_lsms);
+ errno = 0;
+ ASSERT_EQ(-1, lsm_list_modules(syscall_lsms, &size, 0));
+ ASSERT_EQ(E2BIG, errno);
+ ASSERT_NE(1, size);
+
+ free(syscall_lsms);
+}
+
+TEST(flags_set_lsm_list_modules)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ __u64 *syscall_lsms = calloc(page_size, 1);
+ size_t size = page_size;
+
+ ASSERT_NE(NULL, syscall_lsms);
+ errno = 0;
+ ASSERT_EQ(-1, lsm_list_modules(syscall_lsms, &size, 7));
+ ASSERT_EQ(EINVAL, errno);
+ ASSERT_EQ(page_size, size);
+
+ free(syscall_lsms);
+}
+
+TEST(correct_lsm_list_modules)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ size_t size = page_size;
+ __u64 *syscall_lsms = calloc(page_size, 1);
+ char *sysfs_lsms = calloc(page_size, 1);
+ char *name;
+ char *cp;
+ int count;
+ int i;
+
+ ASSERT_NE(NULL, sysfs_lsms);
+ ASSERT_NE(NULL, syscall_lsms);
+ ASSERT_EQ(0, read_sysfs_lsms(sysfs_lsms, page_size));
+
+ count = lsm_list_modules(syscall_lsms, &size, 0);
+ ASSERT_LE(1, count);
+ cp = sysfs_lsms;
+ for (i = 0; i < count; i++) {
+ switch (syscall_lsms[i]) {
+ case LSM_ID_CAPABILITY:
+ name = "capability";
+ break;
+ case LSM_ID_SELINUX:
+ name = "selinux";
+ break;
+ case LSM_ID_SMACK:
+ name = "smack";
+ break;
+ case LSM_ID_TOMOYO:
+ name = "tomoyo";
+ break;
+ case LSM_ID_APPARMOR:
+ name = "apparmor";
+ break;
+ case LSM_ID_YAMA:
+ name = "yama";
+ break;
+ case LSM_ID_LOADPIN:
+ name = "loadpin";
+ break;
+ case LSM_ID_SAFESETID:
+ name = "safesetid";
+ break;
+ case LSM_ID_LOCKDOWN:
+ name = "lockdown";
+ break;
+ case LSM_ID_BPF:
+ name = "bpf";
+ break;
+ case LSM_ID_LANDLOCK:
+ name = "landlock";
+ break;
+ default:
+ name = "INVALID";
+ break;
+ }
+ ASSERT_EQ(0, strncmp(cp, name, strlen(name)));
+ cp += strlen(name) + 1;
+ }
+
+ free(sysfs_lsms);
+ free(syscall_lsms);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/lsm/lsm_set_self_attr_test.c b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c
new file mode 100644
index 000000000000..e9712c6cf596
--- /dev/null
+++ b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Security Module infrastructure tests
+ * Tests for the lsm_set_self_attr system call
+ *
+ * Copyright © 2022 Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#define _GNU_SOURCE
+#include <linux/lsm.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include "../kselftest_harness.h"
+#include "common.h"
+
+TEST(ctx_null_lsm_set_self_attr)
+{
+ ASSERT_EQ(-1, lsm_set_self_attr(LSM_ATTR_CURRENT, NULL,
+ sizeof(struct lsm_ctx), 0));
+}
+
+TEST(size_too_small_lsm_set_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ size_t size = page_size;
+
+ ASSERT_NE(NULL, ctx);
+ if (attr_lsm_count()) {
+ ASSERT_LE(1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size,
+ 0));
+ }
+ ASSERT_EQ(-1, lsm_set_self_attr(LSM_ATTR_CURRENT, ctx, 1, 0));
+
+ free(ctx);
+}
+
+TEST(flags_zero_lsm_set_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ struct lsm_ctx *ctx = calloc(page_size, 1);
+ size_t size = page_size;
+
+ ASSERT_NE(NULL, ctx);
+ if (attr_lsm_count()) {
+ ASSERT_LE(1, lsm_get_self_attr(LSM_ATTR_CURRENT, ctx, &size,
+ 0));
+ }
+ ASSERT_EQ(-1, lsm_set_self_attr(LSM_ATTR_CURRENT, ctx, size, 1));
+
+ free(ctx);
+}
+
+TEST(flags_overset_lsm_set_self_attr)
+{
+ const long page_size = sysconf(_SC_PAGESIZE);
+ char *ctx = calloc(page_size, 1);
+ size_t size = page_size;
+ struct lsm_ctx *tctx = (struct lsm_ctx *)ctx;
+
+ ASSERT_NE(NULL, ctx);
+ if (attr_lsm_count()) {
+ ASSERT_LE(1, lsm_get_self_attr(LSM_ATTR_CURRENT, tctx, &size,
+ 0));
+ }
+ ASSERT_EQ(-1, lsm_set_self_attr(LSM_ATTR_CURRENT | LSM_ATTR_PREV, tctx,
+ size, 0));
+
+ free(ctx);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index dede0bcf97a3..2453add65d12 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -117,8 +117,8 @@ TEST_FILES += va_high_addr_switch.sh
include ../lib.mk
-$(TEST_GEN_PROGS): vm_util.c
-$(TEST_GEN_FILES): vm_util.c
+$(TEST_GEN_PROGS): vm_util.c thp_settings.c
+$(TEST_GEN_FILES): vm_util.c thp_settings.c
$(OUTPUT)/uffd-stress: uffd-common.c
$(OUTPUT)/uffd-unit-tests: uffd-common.c
diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
index 9b420140ba2b..656afba02dbc 100644
--- a/tools/testing/selftests/mm/compaction_test.c
+++ b/tools/testing/selftests/mm/compaction_test.c
@@ -33,7 +33,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
FILE *cmdfile = popen(cmd, "r");
if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
- perror("Failed to read meminfo\n");
+ ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
return -1;
}
@@ -44,7 +44,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
cmdfile = popen(cmd, "r");
if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
- perror("Failed to read meminfo\n");
+ ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
return -1;
}
@@ -62,14 +62,14 @@ int prereq(void)
fd = open("/proc/sys/vm/compact_unevictable_allowed",
O_RDONLY | O_NONBLOCK);
if (fd < 0) {
- perror("Failed to open\n"
- "/proc/sys/vm/compact_unevictable_allowed\n");
+ ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n",
+ strerror(errno));
return -1;
}
if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
- perror("Failed to read from\n"
- "/proc/sys/vm/compact_unevictable_allowed\n");
+ ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n",
+ strerror(errno));
close(fd);
return -1;
}
@@ -78,12 +78,13 @@ int prereq(void)
if (allowed == '1')
return 0;
+ ksft_print_msg("Compaction isn't allowed\n");
return -1;
}
int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
{
- int fd;
+ int fd, ret = -1;
int compaction_index = 0;
char initial_nr_hugepages[10] = {0};
char nr_hugepages[10] = {0};
@@ -94,18 +95,21 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
if (fd < 0) {
- perror("Failed to open /proc/sys/vm/nr_hugepages");
+ ksft_test_result_fail("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
return -1;
}
if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
- perror("Failed to read from /proc/sys/vm/nr_hugepages");
+ ksft_test_result_fail("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
goto close_fd;
}
/* Start with the initial condition of 0 huge pages*/
if (write(fd, "0", sizeof(char)) != sizeof(char)) {
- perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
+ ksft_test_result_fail("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
goto close_fd;
}
@@ -114,14 +118,16 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
/* Request a large number of huge pages. The Kernel will allocate
as much as it can */
if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
- perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
+ ksft_test_result_fail("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
- perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
+ ksft_test_result_fail("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
goto close_fd;
}
@@ -129,67 +135,58 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
huge pages */
compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
- if (compaction_index > 3) {
- printf("No of huge pages allocated = %d\n",
- (atoi(nr_hugepages)));
- fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
- "as huge pages\n", compaction_index);
- goto close_fd;
- }
-
- printf("No of huge pages allocated = %d\n",
- (atoi(nr_hugepages)));
-
lseek(fd, 0, SEEK_SET);
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
- perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
+ ksft_test_result_fail("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
goto close_fd;
}
- close(fd);
- return 0;
+ if (compaction_index > 3) {
+ ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
+ "as huge pages\n", compaction_index);
+ ksft_test_result_fail("No of huge pages allocated = %d\n", (atoi(nr_hugepages)));
+ goto close_fd;
+ }
+
+ ksft_test_result_pass("Memory compaction succeeded. No of huge pages allocated = %d\n",
+ (atoi(nr_hugepages)));
+ ret = 0;
close_fd:
close(fd);
- printf("Not OK. Compaction test failed.");
- return -1;
+ return ret;
}
int main(int argc, char **argv)
{
struct rlimit lim;
- struct map_list *list, *entry;
+ struct map_list *list = NULL, *entry;
size_t page_size, i;
void *map = NULL;
unsigned long mem_free = 0;
unsigned long hugepage_size = 0;
long mem_fragmentable_MB = 0;
- if (prereq() != 0) {
- printf("Either the sysctl compact_unevictable_allowed is not\n"
- "set to 1 or couldn't read the proc file.\n"
- "Skipping the test\n");
- return KSFT_SKIP;
- }
+ ksft_print_header();
+
+ if (prereq() || geteuid())
+ return ksft_exit_pass();
+
+ ksft_set_plan(1);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
- if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
- perror("Failed to set rlimit:\n");
- return -1;
- }
+ if (setrlimit(RLIMIT_MEMLOCK, &lim))
+ ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno));
page_size = getpagesize();
- list = NULL;
-
- if (read_memory_info(&mem_free, &hugepage_size) != 0) {
- printf("ERROR: Cannot read meminfo\n");
- return -1;
- }
+ if (read_memory_info(&mem_free, &hugepage_size) != 0)
+ ksft_exit_fail_msg("Failed to get meminfo\n");
mem_fragmentable_MB = mem_free * 0.8 / 1024;
@@ -225,7 +222,7 @@ int main(int argc, char **argv)
}
if (check_compaction(mem_free, hugepage_size) == 0)
- return 0;
+ return ksft_exit_pass();
- return -1;
+ return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 6f2f83990441..363bf5f801be 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -29,15 +29,49 @@
#include "../../../../mm/gup_test.h"
#include "../kselftest.h"
#include "vm_util.h"
+#include "thp_settings.h"
static size_t pagesize;
static int pagemap_fd;
-static size_t thpsize;
+static size_t pmdsize;
+static int nr_thpsizes;
+static size_t thpsizes[20];
static int nr_hugetlbsizes;
static size_t hugetlbsizes[10];
static int gup_fd;
static bool has_huge_zeropage;
+static int sz2ord(size_t size)
+{
+ return __builtin_ctzll(size / pagesize);
+}
+
+static int detect_thp_sizes(size_t sizes[], int max)
+{
+ int count = 0;
+ unsigned long orders;
+ size_t kb;
+ int i;
+
+ /* thp not supported at all. */
+ if (!pmdsize)
+ return 0;
+
+ orders = 1UL << sz2ord(pmdsize);
+ orders |= thp_supported_orders();
+
+ for (i = 0; orders && count < max; i++) {
+ if (!(orders & (1UL << i)))
+ continue;
+ orders &= ~(1UL << i);
+ kb = (pagesize >> 10) << i;
+ sizes[count++] = kb * 1024;
+ ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb);
+ }
+
+ return count;
+}
+
static void detect_huge_zeropage(void)
{
int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page",
@@ -734,7 +768,7 @@ enum thp_run {
THP_RUN_PARTIAL_SHARED,
};
-static void do_run_with_thp(test_fn fn, enum thp_run thp_run)
+static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
{
char *mem, *mmap_mem, *tmp, *mremap_mem = MAP_FAILED;
size_t size, mmap_size, mremap_size;
@@ -759,11 +793,11 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run)
}
/*
- * Try to populate a THP. Touch the first sub-page and test if we get
- * another sub-page populated automatically.
+ * Try to populate a THP. Touch the first sub-page and test if
+ * we get the last sub-page populated automatically.
*/
mem[0] = 0;
- if (!pagemap_is_populated(pagemap_fd, mem + pagesize)) {
+ if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
@@ -773,12 +807,14 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run)
switch (thp_run) {
case THP_RUN_PMD:
case THP_RUN_PMD_SWAPOUT:
+ assert(thpsize == pmdsize);
break;
case THP_RUN_PTE:
case THP_RUN_PTE_SWAPOUT:
/*
* Trigger PTE-mapping the THP by temporarily mapping a single
- * subpage R/O.
+ * subpage R/O. This is a noop if the THP is not pmdsize (and
+ * therefore already PTE-mapped).
*/
ret = mprotect(mem + pagesize, pagesize, PROT_READ);
if (ret) {
@@ -875,52 +911,60 @@ munmap:
munmap(mremap_mem, mremap_size);
}
-static void run_with_thp(test_fn fn, const char *desc)
+static void run_with_thp(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PMD);
+ ksft_print_msg("[RUN] %s ... with THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PMD, size);
}
-static void run_with_thp_swap(test_fn fn, const char *desc)
+static void run_with_thp_swap(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with swapped-out THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT);
+ ksft_print_msg("[RUN] %s ... with swapped-out THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT, size);
}
-static void run_with_pte_mapped_thp(test_fn fn, const char *desc)
+static void run_with_pte_mapped_thp(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with PTE-mapped THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PTE);
+ ksft_print_msg("[RUN] %s ... with PTE-mapped THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PTE, size);
}
-static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc)
+static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT);
+ ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT, size);
}
-static void run_with_single_pte_of_thp(test_fn fn, const char *desc)
+static void run_with_single_pte_of_thp(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with single PTE of THP\n", desc);
- do_run_with_thp(fn, THP_RUN_SINGLE_PTE);
+ ksft_print_msg("[RUN] %s ... with single PTE of THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_SINGLE_PTE, size);
}
-static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc)
+static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP\n", desc);
- do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT);
+ ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT, size);
}
-static void run_with_partial_mremap_thp(test_fn fn, const char *desc)
+static void run_with_partial_mremap_thp(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP);
+ ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP, size);
}
-static void run_with_partial_shared_thp(test_fn fn, const char *desc)
+static void run_with_partial_shared_thp(test_fn fn, const char *desc, size_t size)
{
- ksft_print_msg("[RUN] %s ... with partially shared THP\n", desc);
- do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED);
+ ksft_print_msg("[RUN] %s ... with partially shared THP (%zu kB)\n",
+ desc, size / 1024);
+ do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED, size);
}
static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize)
@@ -1091,15 +1135,27 @@ static void run_anon_test_case(struct test_case const *test_case)
run_with_base_page(test_case->fn, test_case->desc);
run_with_base_page_swap(test_case->fn, test_case->desc);
- if (thpsize) {
- run_with_thp(test_case->fn, test_case->desc);
- run_with_thp_swap(test_case->fn, test_case->desc);
- run_with_pte_mapped_thp(test_case->fn, test_case->desc);
- run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc);
- run_with_single_pte_of_thp(test_case->fn, test_case->desc);
- run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc);
- run_with_partial_mremap_thp(test_case->fn, test_case->desc);
- run_with_partial_shared_thp(test_case->fn, test_case->desc);
+ for (i = 0; i < nr_thpsizes; i++) {
+ size_t size = thpsizes[i];
+ struct thp_settings settings = *thp_current_settings();
+
+ settings.hugepages[sz2ord(pmdsize)].enabled = THP_NEVER;
+ settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS;
+ thp_push_settings(&settings);
+
+ if (size == pmdsize) {
+ run_with_thp(test_case->fn, test_case->desc, size);
+ run_with_thp_swap(test_case->fn, test_case->desc, size);
+ }
+
+ run_with_pte_mapped_thp(test_case->fn, test_case->desc, size);
+ run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc, size);
+ run_with_single_pte_of_thp(test_case->fn, test_case->desc, size);
+ run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc, size);
+ run_with_partial_mremap_thp(test_case->fn, test_case->desc, size);
+ run_with_partial_shared_thp(test_case->fn, test_case->desc, size);
+
+ thp_pop_settings();
}
for (i = 0; i < nr_hugetlbsizes; i++)
run_with_hugetlb(test_case->fn, test_case->desc,
@@ -1120,8 +1176,9 @@ static int tests_per_anon_test_case(void)
{
int tests = 2 + nr_hugetlbsizes;
- if (thpsize)
- tests += 8;
+ tests += 6 * nr_thpsizes;
+ if (pmdsize)
+ tests += 2;
return tests;
}
@@ -1329,7 +1386,7 @@ static void run_anon_thp_test_cases(void)
{
int i;
- if (!thpsize)
+ if (!pmdsize)
return;
ksft_print_msg("[INFO] Anonymous THP tests\n");
@@ -1338,13 +1395,13 @@ static void run_anon_thp_test_cases(void)
struct test_case const *test_case = &anon_thp_test_cases[i];
ksft_print_msg("[RUN] %s\n", test_case->desc);
- do_run_with_thp(test_case->fn, THP_RUN_PMD);
+ do_run_with_thp(test_case->fn, THP_RUN_PMD, pmdsize);
}
}
static int tests_per_anon_thp_test_case(void)
{
- return thpsize ? 1 : 0;
+ return pmdsize ? 1 : 0;
}
typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size);
@@ -1419,7 +1476,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
}
/* For alignment purposes, we need twice the thp size. */
- mmap_size = 2 * thpsize;
+ mmap_size = 2 * pmdsize;
mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mmap_mem == MAP_FAILED) {
@@ -1434,11 +1491,11 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
}
/* We need a THP-aligned memory area. */
- mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1));
- smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1));
+ mem = (char *)(((uintptr_t)mmap_mem + pmdsize) & ~(pmdsize - 1));
+ smem = (char *)(((uintptr_t)mmap_smem + pmdsize) & ~(pmdsize - 1));
- ret = madvise(mem, thpsize, MADV_HUGEPAGE);
- ret |= madvise(smem, thpsize, MADV_HUGEPAGE);
+ ret = madvise(mem, pmdsize, MADV_HUGEPAGE);
+ ret |= madvise(smem, pmdsize, MADV_HUGEPAGE);
if (ret) {
ksft_test_result_fail("MADV_HUGEPAGE failed\n");
goto munmap;
@@ -1457,7 +1514,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
goto munmap;
}
- fn(mem, smem, thpsize);
+ fn(mem, smem, pmdsize);
munmap:
munmap(mmap_mem, mmap_size);
if (mmap_smem != MAP_FAILED)
@@ -1650,7 +1707,7 @@ static void run_non_anon_test_case(struct non_anon_test_case const *test_case)
run_with_zeropage(test_case->fn, test_case->desc);
run_with_memfd(test_case->fn, test_case->desc);
run_with_tmpfile(test_case->fn, test_case->desc);
- if (thpsize)
+ if (pmdsize)
run_with_huge_zeropage(test_case->fn, test_case->desc);
for (i = 0; i < nr_hugetlbsizes; i++)
run_with_memfd_hugetlb(test_case->fn, test_case->desc,
@@ -1671,7 +1728,7 @@ static int tests_per_non_anon_test_case(void)
{
int tests = 3 + nr_hugetlbsizes;
- if (thpsize)
+ if (pmdsize)
tests += 1;
return tests;
}
@@ -1679,14 +1736,23 @@ static int tests_per_non_anon_test_case(void)
int main(int argc, char **argv)
{
int err;
+ struct thp_settings default_settings;
ksft_print_header();
pagesize = getpagesize();
- thpsize = read_pmd_pagesize();
- if (thpsize)
- ksft_print_msg("[INFO] detected THP size: %zu KiB\n",
- thpsize / 1024);
+ pmdsize = read_pmd_pagesize();
+ if (pmdsize) {
+ /* Only if THP is supported. */
+ thp_read_settings(&default_settings);
+ default_settings.hugepages[sz2ord(pmdsize)].enabled = THP_INHERIT;
+ thp_save_settings();
+ thp_push_settings(&default_settings);
+
+ ksft_print_msg("[INFO] detected PMD size: %zu KiB\n",
+ pmdsize / 1024);
+ nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes));
+ }
nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
ARRAY_SIZE(hugetlbsizes));
detect_huge_zeropage();
@@ -1704,6 +1770,11 @@ int main(int argc, char **argv)
run_anon_thp_test_cases();
run_non_anon_test_cases();
+ if (pmdsize) {
+ /* Only if THP is supported. */
+ thp_restore_settings();
+ }
+
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index ec2229136384..cbe99594d319 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -50,39 +50,41 @@ static char *cmd_to_str(unsigned long cmd)
void *gup_thread(void *data)
{
struct gup_test gup = *(struct gup_test *)data;
- int i;
+ int i, status;
/* Only report timing information on the *_BENCHMARK commands: */
if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
(cmd == PIN_LONGTERM_BENCHMARK)) {
for (i = 0; i < repeats; i++) {
gup.size = size;
- if (ioctl(gup_fd, cmd, &gup))
- perror("ioctl"), exit(1);
+ status = ioctl(gup_fd, cmd, &gup);
+ if (status)
+ break;
pthread_mutex_lock(&print_mutex);
- printf("%s: Time: get:%lld put:%lld us",
- cmd_to_str(cmd), gup.get_delta_usec,
- gup.put_delta_usec);
+ ksft_print_msg("%s: Time: get:%lld put:%lld us",
+ cmd_to_str(cmd), gup.get_delta_usec,
+ gup.put_delta_usec);
if (gup.size != size)
- printf(", truncated (size: %lld)", gup.size);
- printf("\n");
+ ksft_print_msg(", truncated (size: %lld)", gup.size);
+ ksft_print_msg("\n");
pthread_mutex_unlock(&print_mutex);
}
} else {
gup.size = size;
- if (ioctl(gup_fd, cmd, &gup)) {
- perror("ioctl");
- exit(1);
- }
+ status = ioctl(gup_fd, cmd, &gup);
+ if (status)
+ goto return_;
pthread_mutex_lock(&print_mutex);
- printf("%s: done\n", cmd_to_str(cmd));
+ ksft_print_msg("%s: done\n", cmd_to_str(cmd));
if (gup.size != size)
- printf("Truncated (size: %lld)\n", gup.size);
+ ksft_print_msg("Truncated (size: %lld)\n", gup.size);
pthread_mutex_unlock(&print_mutex);
}
+return_:
+ ksft_test_result(!status, "ioctl status %d\n", status);
return NULL;
}
@@ -170,7 +172,7 @@ int main(int argc, char **argv)
touch = 1;
break;
default:
- return -1;
+ ksft_exit_fail_msg("Wrong argument\n");
}
}
@@ -198,11 +200,12 @@ int main(int argc, char **argv)
}
}
+ ksft_print_header();
+ ksft_set_plan(nthreads);
+
filed = open(file, O_RDWR|O_CREAT);
- if (filed < 0) {
- perror("open");
- exit(filed);
- }
+ if (filed < 0)
+ ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
gup.nr_pages_per_call = nr_pages;
if (write)
@@ -213,27 +216,24 @@ int main(int argc, char **argv)
switch (errno) {
case EACCES:
if (getuid())
- printf("Please run this test as root\n");
+ ksft_print_msg("Please run this test as root\n");
break;
case ENOENT:
- if (opendir("/sys/kernel/debug") == NULL) {
- printf("mount debugfs at /sys/kernel/debug\n");
- break;
- }
- printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
+ if (opendir("/sys/kernel/debug") == NULL)
+ ksft_print_msg("mount debugfs at /sys/kernel/debug\n");
+ ksft_print_msg("check if CONFIG_GUP_TEST is enabled in kernel config\n");
break;
default:
- perror("failed to open " GUP_TEST_FILE);
+ ksft_print_msg("failed to open %s: %s\n", GUP_TEST_FILE, strerror(errno));
break;
}
- exit(KSFT_SKIP);
+ ksft_test_result_skip("Please run this test as root\n");
+ return ksft_exit_pass();
}
p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
- if (p == MAP_FAILED) {
- perror("mmap");
- exit(1);
- }
+ if (p == MAP_FAILED)
+ ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
gup.addr = (unsigned long)p;
if (thp == 1)
@@ -264,7 +264,8 @@ int main(int argc, char **argv)
ret = pthread_join(tid[i], NULL);
assert(ret == 0);
}
+
free(tid);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/hugepage-mmap.c b/tools/testing/selftests/mm/hugepage-mmap.c
index 955ef87f382c..267eea2e0e0b 100644
--- a/tools/testing/selftests/mm/hugepage-mmap.c
+++ b/tools/testing/selftests/mm/hugepage-mmap.c
@@ -22,6 +22,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
+#include "../kselftest.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
@@ -37,7 +38,7 @@
static void check_bytes(char *addr)
{
- printf("First hex is %x\n", *((unsigned int *)addr));
+ ksft_print_msg("First hex is %x\n", *((unsigned int *)addr));
}
static void write_bytes(char *addr)
@@ -55,7 +56,7 @@ static int read_bytes(char *addr)
check_bytes(addr);
for (i = 0; i < LENGTH; i++)
if (*(addr + i) != (char)i) {
- printf("Mismatch at %lu\n", i);
+ ksft_print_msg("Error: Mismatch at %lu\n", i);
return 1;
}
return 0;
@@ -66,20 +67,20 @@ int main(void)
void *addr;
int fd, ret;
+ ksft_print_header();
+ ksft_set_plan(1);
+
fd = memfd_create("hugepage-mmap", MFD_HUGETLB);
- if (fd < 0) {
- perror("memfd_create() failed");
- exit(1);
- }
+ if (fd < 0)
+ ksft_exit_fail_msg("memfd_create() failed: %s\n", strerror(errno));
addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, fd, 0);
if (addr == MAP_FAILED) {
- perror("mmap");
close(fd);
- exit(1);
+ ksft_exit_fail_msg("mmap(): %s\n", strerror(errno));
}
- printf("Returned address is %p\n", addr);
+ ksft_print_msg("Returned address is %p\n", addr);
check_bytes(addr);
write_bytes(addr);
ret = read_bytes(addr);
@@ -87,5 +88,7 @@ int main(void)
munmap(addr, LENGTH);
close(fd);
- return ret;
+ ksft_test_result(!ret, "Read same data\n");
+
+ ksft_exit(!ret);
}
diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
index cabd0084f57b..c463d1c09c9b 100644
--- a/tools/testing/selftests/mm/hugepage-mremap.c
+++ b/tools/testing/selftests/mm/hugepage-mremap.c
@@ -24,6 +24,7 @@
#include <sys/ioctl.h>
#include <string.h>
#include <stdbool.h>
+#include "../kselftest.h"
#include "vm_util.h"
#define DEFAULT_LENGTH_MB 10UL
@@ -34,7 +35,7 @@
static void check_bytes(char *addr)
{
- printf("First hex is %x\n", *((unsigned int *)addr));
+ ksft_print_msg("First hex is %x\n", *((unsigned int *)addr));
}
static void write_bytes(char *addr, size_t len)
@@ -52,7 +53,7 @@ static int read_bytes(char *addr, size_t len)
check_bytes(addr);
for (i = 0; i < len; i++)
if (*(addr + i) != (char)i) {
- printf("Mismatch at %lu\n", i);
+ ksft_print_msg("Mismatch at %lu\n", i);
return 1;
}
return 0;
@@ -66,17 +67,13 @@ static void register_region_with_uffd(char *addr, size_t len)
/* Create and enable userfaultfd object. */
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- if (uffd == -1) {
- perror("userfaultfd");
- exit(1);
- }
+ if (uffd == -1)
+ ksft_exit_fail_msg("userfaultfd: %s\n", strerror(errno));
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
- if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
- perror("ioctl-UFFDIO_API");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1)
+ ksft_exit_fail_msg("ioctl-UFFDIO_API: %s\n", strerror(errno));
/* Create a private anonymous mapping. The memory will be
* demand-zero paged--that is, not yet allocated. When we
@@ -86,21 +83,17 @@ static void register_region_with_uffd(char *addr, size_t len)
addr = mmap(NULL, len, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- perror("mmap");
- exit(1);
- }
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
- printf("Address returned by mmap() = %p\n", addr);
+ ksft_print_msg("Address returned by mmap() = %p\n", addr);
/* Register the memory range of the mapping we just created for
* handling by the userfaultfd object. In mode, we request to track
* missing pages (i.e., pages that have not yet been faulted in).
*/
- if (uffd_register(uffd, addr, len, true, false, false)) {
- perror("ioctl-UFFDIO_REGISTER");
- exit(1);
- }
+ if (uffd_register(uffd, addr, len, true, false, false))
+ ksft_exit_fail_msg("ioctl-UFFDIO_REGISTER: %s\n", strerror(errno));
}
int main(int argc, char *argv[])
@@ -108,10 +101,11 @@ int main(int argc, char *argv[])
size_t length = 0;
int ret = 0, fd;
- if (argc >= 2 && !strcmp(argv[1], "-h")) {
- printf("Usage: %s [length_in_MB]\n", argv[0]);
- exit(1);
- }
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ if (argc >= 2 && !strcmp(argv[1], "-h"))
+ ksft_exit_fail_msg("Usage: %s [length_in_MB]\n", argv[0]);
/* Read memory length as the first arg if valid, otherwise fallback to
* the default length.
@@ -123,50 +117,40 @@ int main(int argc, char *argv[])
length = MB_TO_BYTES(length);
fd = memfd_create(argv[0], MFD_HUGETLB);
- if (fd < 0) {
- perror("Open failed");
- exit(1);
- }
+ if (fd < 0)
+ ksft_exit_fail_msg("Open failed: %s\n", strerror(errno));
/* mmap to a PUD aligned address to hopefully trigger pmd sharing. */
unsigned long suggested_addr = 0x7eaa40000000;
void *haddr = mmap((void *)suggested_addr, length, PROTECTION,
MAP_HUGETLB | MAP_SHARED | MAP_POPULATE, fd, 0);
- printf("Map haddr: Returned address is %p\n", haddr);
- if (haddr == MAP_FAILED) {
- perror("mmap1");
- exit(1);
- }
+ ksft_print_msg("Map haddr: Returned address is %p\n", haddr);
+ if (haddr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap1: %s\n", strerror(errno));
/* mmap again to a dummy address to hopefully trigger pmd sharing. */
suggested_addr = 0x7daa40000000;
void *daddr = mmap((void *)suggested_addr, length, PROTECTION,
MAP_HUGETLB | MAP_SHARED | MAP_POPULATE, fd, 0);
- printf("Map daddr: Returned address is %p\n", daddr);
- if (daddr == MAP_FAILED) {
- perror("mmap3");
- exit(1);
- }
+ ksft_print_msg("Map daddr: Returned address is %p\n", daddr);
+ if (daddr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap3: %s\n", strerror(errno));
suggested_addr = 0x7faa40000000;
void *vaddr =
mmap((void *)suggested_addr, length, PROTECTION, FLAGS, -1, 0);
- printf("Map vaddr: Returned address is %p\n", vaddr);
- if (vaddr == MAP_FAILED) {
- perror("mmap2");
- exit(1);
- }
+ ksft_print_msg("Map vaddr: Returned address is %p\n", vaddr);
+ if (vaddr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap2: %s\n", strerror(errno));
register_region_with_uffd(haddr, length);
void *addr = mremap(haddr, length, length,
MREMAP_MAYMOVE | MREMAP_FIXED, vaddr);
- if (addr == MAP_FAILED) {
- perror("mremap");
- exit(1);
- }
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mremap: %s\n", strerror(errno));
- printf("Mremap: Returned address is %p\n", addr);
+ ksft_print_msg("Mremap: Returned address is %p\n", addr);
check_bytes(addr);
write_bytes(addr, length);
ret = read_bytes(addr, length);
@@ -174,12 +158,11 @@ int main(int argc, char *argv[])
munmap(addr, length);
addr = mremap(addr, length, length, 0);
- if (addr != MAP_FAILED) {
- printf("mremap: Expected failure, but call succeeded\n");
- exit(1);
- }
+ if (addr != MAP_FAILED)
+ ksft_exit_fail_msg("mremap: Expected failure, but call succeeded\n");
close(fd);
- return ret;
+ ksft_test_result(!ret, "Read same data\n");
+ ksft_exit(!ret);
}
diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
index 030667cb5533..829320a519e7 100644
--- a/tools/testing/selftests/mm/khugepaged.c
+++ b/tools/testing/selftests/mm/khugepaged.c
@@ -22,13 +22,14 @@
#include "linux/magic.h"
#include "vm_util.h"
+#include "thp_settings.h"
#define BASE_ADDR ((void *)(1UL << 30))
static unsigned long hpage_pmd_size;
static unsigned long page_size;
static int hpage_pmd_nr;
+static int anon_order;
-#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
#define PID_SMAPS "/proc/self/smaps"
#define TEST_FILE "collapse_test_file"
@@ -71,78 +72,7 @@ struct file_info {
};
static struct file_info finfo;
-
-enum thp_enabled {
- THP_ALWAYS,
- THP_MADVISE,
- THP_NEVER,
-};
-
-static const char *thp_enabled_strings[] = {
- "always",
- "madvise",
- "never",
- NULL
-};
-
-enum thp_defrag {
- THP_DEFRAG_ALWAYS,
- THP_DEFRAG_DEFER,
- THP_DEFRAG_DEFER_MADVISE,
- THP_DEFRAG_MADVISE,
- THP_DEFRAG_NEVER,
-};
-
-static const char *thp_defrag_strings[] = {
- "always",
- "defer",
- "defer+madvise",
- "madvise",
- "never",
- NULL
-};
-
-enum shmem_enabled {
- SHMEM_ALWAYS,
- SHMEM_WITHIN_SIZE,
- SHMEM_ADVISE,
- SHMEM_NEVER,
- SHMEM_DENY,
- SHMEM_FORCE,
-};
-
-static const char *shmem_enabled_strings[] = {
- "always",
- "within_size",
- "advise",
- "never",
- "deny",
- "force",
- NULL
-};
-
-struct khugepaged_settings {
- bool defrag;
- unsigned int alloc_sleep_millisecs;
- unsigned int scan_sleep_millisecs;
- unsigned int max_ptes_none;
- unsigned int max_ptes_swap;
- unsigned int max_ptes_shared;
- unsigned long pages_to_scan;
-};
-
-struct settings {
- enum thp_enabled thp_enabled;
- enum thp_defrag thp_defrag;
- enum shmem_enabled shmem_enabled;
- bool use_zero_page;
- struct khugepaged_settings khugepaged;
- unsigned long read_ahead_kb;
-};
-
-static struct settings saved_settings;
static bool skip_settings_restore;
-
static int exit_status;
static void success(const char *msg)
@@ -161,260 +91,34 @@ static void skip(const char *msg)
printf(" \e[33m%s\e[0m\n", msg);
}
-static int read_file(const char *path, char *buf, size_t buflen)
-{
- int fd;
- ssize_t numread;
-
- fd = open(path, O_RDONLY);
- if (fd == -1)
- return 0;
-
- numread = read(fd, buf, buflen - 1);
- if (numread < 1) {
- close(fd);
- return 0;
- }
-
- buf[numread] = '\0';
- close(fd);
-
- return (unsigned int) numread;
-}
-
-static int write_file(const char *path, const char *buf, size_t buflen)
-{
- int fd;
- ssize_t numwritten;
-
- fd = open(path, O_WRONLY);
- if (fd == -1) {
- printf("open(%s)\n", path);
- exit(EXIT_FAILURE);
- return 0;
- }
-
- numwritten = write(fd, buf, buflen - 1);
- close(fd);
- if (numwritten < 1) {
- printf("write(%s)\n", buf);
- exit(EXIT_FAILURE);
- return 0;
- }
-
- return (unsigned int) numwritten;
-}
-
-static int read_string(const char *name, const char *strings[])
+static void restore_settings_atexit(void)
{
- char path[PATH_MAX];
- char buf[256];
- char *c;
- int ret;
-
- ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
- if (ret >= PATH_MAX) {
- printf("%s: Pathname is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
-
- if (!read_file(path, buf, sizeof(buf))) {
- perror(path);
- exit(EXIT_FAILURE);
- }
-
- c = strchr(buf, '[');
- if (!c) {
- printf("%s: Parse failure\n", __func__);
- exit(EXIT_FAILURE);
- }
-
- c++;
- memmove(buf, c, sizeof(buf) - (c - buf));
-
- c = strchr(buf, ']');
- if (!c) {
- printf("%s: Parse failure\n", __func__);
- exit(EXIT_FAILURE);
- }
- *c = '\0';
-
- ret = 0;
- while (strings[ret]) {
- if (!strcmp(strings[ret], buf))
- return ret;
- ret++;
- }
-
- printf("Failed to parse %s\n", name);
- exit(EXIT_FAILURE);
-}
-
-static void write_string(const char *name, const char *val)
-{
- char path[PATH_MAX];
- int ret;
-
- ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
- if (ret >= PATH_MAX) {
- printf("%s: Pathname is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
-
- if (!write_file(path, val, strlen(val) + 1)) {
- perror(path);
- exit(EXIT_FAILURE);
- }
-}
-
-static const unsigned long _read_num(const char *path)
-{
- char buf[21];
-
- if (read_file(path, buf, sizeof(buf)) < 0) {
- perror("read_file(read_num)");
- exit(EXIT_FAILURE);
- }
-
- return strtoul(buf, NULL, 10);
-}
-
-static const unsigned long read_num(const char *name)
-{
- char path[PATH_MAX];
- int ret;
-
- ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
- if (ret >= PATH_MAX) {
- printf("%s: Pathname is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
- return _read_num(path);
-}
-
-static void _write_num(const char *path, unsigned long num)
-{
- char buf[21];
-
- sprintf(buf, "%ld", num);
- if (!write_file(path, buf, strlen(buf) + 1)) {
- perror(path);
- exit(EXIT_FAILURE);
- }
-}
-
-static void write_num(const char *name, unsigned long num)
-{
- char path[PATH_MAX];
- int ret;
-
- ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
- if (ret >= PATH_MAX) {
- printf("%s: Pathname is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
- _write_num(path, num);
-}
-
-static void write_settings(struct settings *settings)
-{
- struct khugepaged_settings *khugepaged = &settings->khugepaged;
-
- write_string("enabled", thp_enabled_strings[settings->thp_enabled]);
- write_string("defrag", thp_defrag_strings[settings->thp_defrag]);
- write_string("shmem_enabled",
- shmem_enabled_strings[settings->shmem_enabled]);
- write_num("use_zero_page", settings->use_zero_page);
-
- write_num("khugepaged/defrag", khugepaged->defrag);
- write_num("khugepaged/alloc_sleep_millisecs",
- khugepaged->alloc_sleep_millisecs);
- write_num("khugepaged/scan_sleep_millisecs",
- khugepaged->scan_sleep_millisecs);
- write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none);
- write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
- write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared);
- write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
-
- if (file_ops && finfo.type == VMA_FILE)
- _write_num(finfo.dev_queue_read_ahead_path,
- settings->read_ahead_kb);
-}
-
-#define MAX_SETTINGS_DEPTH 4
-static struct settings settings_stack[MAX_SETTINGS_DEPTH];
-static int settings_index;
-
-static struct settings *current_settings(void)
-{
- if (!settings_index) {
- printf("Fail: No settings set");
- exit(EXIT_FAILURE);
- }
- return settings_stack + settings_index - 1;
-}
+ if (skip_settings_restore)
+ return;
-static void push_settings(struct settings *settings)
-{
- if (settings_index >= MAX_SETTINGS_DEPTH) {
- printf("Fail: Settings stack exceeded");
- exit(EXIT_FAILURE);
- }
- settings_stack[settings_index++] = *settings;
- write_settings(current_settings());
-}
+ printf("Restore THP and khugepaged settings...");
+ thp_restore_settings();
+ success("OK");
-static void pop_settings(void)
-{
- if (settings_index <= 0) {
- printf("Fail: Settings stack empty");
- exit(EXIT_FAILURE);
- }
- --settings_index;
- write_settings(current_settings());
+ skip_settings_restore = true;
}
static void restore_settings(int sig)
{
- if (skip_settings_restore)
- goto out;
-
- printf("Restore THP and khugepaged settings...");
- write_settings(&saved_settings);
- success("OK");
- if (sig)
- exit(EXIT_FAILURE);
-out:
- exit(exit_status);
+ /* exit() will invoke the restore_settings_atexit handler. */
+ exit(sig ? EXIT_FAILURE : exit_status);
}
static void save_settings(void)
{
printf("Save THP and khugepaged settings...");
- saved_settings = (struct settings) {
- .thp_enabled = read_string("enabled", thp_enabled_strings),
- .thp_defrag = read_string("defrag", thp_defrag_strings),
- .shmem_enabled =
- read_string("shmem_enabled", shmem_enabled_strings),
- .use_zero_page = read_num("use_zero_page"),
- };
- saved_settings.khugepaged = (struct khugepaged_settings) {
- .defrag = read_num("khugepaged/defrag"),
- .alloc_sleep_millisecs =
- read_num("khugepaged/alloc_sleep_millisecs"),
- .scan_sleep_millisecs =
- read_num("khugepaged/scan_sleep_millisecs"),
- .max_ptes_none = read_num("khugepaged/max_ptes_none"),
- .max_ptes_swap = read_num("khugepaged/max_ptes_swap"),
- .max_ptes_shared = read_num("khugepaged/max_ptes_shared"),
- .pages_to_scan = read_num("khugepaged/pages_to_scan"),
- };
if (file_ops && finfo.type == VMA_FILE)
- saved_settings.read_ahead_kb =
- _read_num(finfo.dev_queue_read_ahead_path);
+ thp_set_read_ahead_path(finfo.dev_queue_read_ahead_path);
+ thp_save_settings();
success("OK");
+ atexit(restore_settings_atexit);
signal(SIGTERM, restore_settings);
signal(SIGINT, restore_settings);
signal(SIGHUP, restore_settings);
@@ -793,7 +497,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect)
{
int ret;
- struct settings settings = *current_settings();
+ struct thp_settings settings = *thp_current_settings();
printf("%s...", msg);
@@ -803,7 +507,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
*/
settings.thp_enabled = THP_NEVER;
settings.shmem_enabled = SHMEM_NEVER;
- push_settings(&settings);
+ thp_push_settings(&settings);
/* Clear VM_NOHUGEPAGE */
madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
@@ -815,7 +519,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
else
success("OK");
- pop_settings();
+ thp_pop_settings();
}
static void madvise_collapse(const char *msg, char *p, int nr_hpages,
@@ -845,13 +549,13 @@ static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
/* Wait until the second full_scan completed */
- full_scans = read_num("khugepaged/full_scans") + 2;
+ full_scans = thp_read_num("khugepaged/full_scans") + 2;
printf("%s...", msg);
while (timeout--) {
if (ops->check_huge(p, nr_hpages))
break;
- if (read_num("khugepaged/full_scans") >= full_scans)
+ if (thp_read_num("khugepaged/full_scans") >= full_scans)
break;
printf(".");
usleep(TICK);
@@ -904,13 +608,18 @@ static bool is_tmpfs(struct mem_ops *ops)
return ops == &__file_ops && finfo.type == VMA_SHMEM;
}
+static bool is_anon(struct mem_ops *ops)
+{
+ return ops == &__anon_ops;
+}
+
static void alloc_at_fault(void)
{
- struct settings settings = *current_settings();
+ struct thp_settings settings = *thp_current_settings();
char *p;
settings.thp_enabled = THP_ALWAYS;
- push_settings(&settings);
+ thp_push_settings(&settings);
p = alloc_mapping(1);
*p = 1;
@@ -920,7 +629,7 @@ static void alloc_at_fault(void)
else
fail("Fail");
- pop_settings();
+ thp_pop_settings();
madvise(p, page_size, MADV_DONTNEED);
printf("Split huge PMD on MADV_DONTNEED...");
@@ -968,11 +677,12 @@ static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops
static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_none = hpage_pmd_nr / 2;
- struct settings settings = *current_settings();
+ struct thp_settings settings = *thp_current_settings();
void *p;
+ int fault_nr_pages = is_anon(ops) ? 1 << anon_order : 1;
settings.khugepaged.max_ptes_none = max_ptes_none;
- push_settings(&settings);
+ thp_push_settings(&settings);
p = ops->setup_area(1);
@@ -983,10 +693,10 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o
goto skip;
}
- ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+ ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size);
c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1,
ops, !c->enforce_pte_scan_limits);
- validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+ validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size);
if (c->enforce_pte_scan_limits) {
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
@@ -997,7 +707,7 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o
}
skip:
ops->cleanup_area(p, hpage_pmd_size);
- pop_settings();
+ thp_pop_settings();
}
static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops)
@@ -1028,7 +738,7 @@ out:
static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops)
{
- int max_ptes_swap = read_num("khugepaged/max_ptes_swap");
+ int max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap");
void *p;
p = ops->setup_area(1);
@@ -1245,11 +955,11 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o
fail("Fail");
ops->fault(p, 0, page_size);
- write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1);
+ thp_write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1);
c->collapse("Collapse PTE table full of compound pages in child",
p, 1, ops, true);
- write_num("khugepaged/max_ptes_shared",
- current_settings()->khugepaged.max_ptes_shared);
+ thp_write_num("khugepaged/max_ptes_shared",
+ thp_current_settings()->khugepaged.max_ptes_shared);
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
@@ -1270,7 +980,7 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o
static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops)
{
- int max_ptes_shared = read_num("khugepaged/max_ptes_shared");
+ int max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared");
int wstatus;
void *p;
@@ -1373,7 +1083,7 @@ static void madvise_retracted_page_tables(struct collapse_context *c,
static void usage(void)
{
- fprintf(stderr, "\nUsage: ./khugepaged <test type> [dir]\n\n");
+ fprintf(stderr, "\nUsage: ./khugepaged [OPTIONS] <test type> [dir]\n\n");
fprintf(stderr, "\t<test type>\t: <context>:<mem_type>\n");
fprintf(stderr, "\t<context>\t: [all|khugepaged|madvise]\n");
fprintf(stderr, "\t<mem_type>\t: [all|anon|file|shmem]\n");
@@ -1382,15 +1092,34 @@ static void usage(void)
fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n");
fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n");
fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n");
+ fprintf(stderr, "\n\tSupported Options:\n");
+ fprintf(stderr, "\t\t-h: This help message.\n");
+ fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n");
+ fprintf(stderr, "\t\t Defaults to 0. Use this size for anon allocations.\n");
exit(1);
}
-static void parse_test_type(int argc, const char **argv)
+static void parse_test_type(int argc, char **argv)
{
+ int opt;
char *buf;
const char *token;
- if (argc == 1) {
+ while ((opt = getopt(argc, argv, "s:h")) != -1) {
+ switch (opt) {
+ case 's':
+ anon_order = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage();
+ }
+ }
+
+ argv += optind;
+ argc -= optind;
+
+ if (argc == 0) {
/* Backwards compatibility */
khugepaged_context = &__khugepaged_context;
madvise_context = &__madvise_context;
@@ -1398,7 +1127,7 @@ static void parse_test_type(int argc, const char **argv)
return;
}
- buf = strdup(argv[1]);
+ buf = strdup(argv[0]);
token = strsep(&buf, ":");
if (!strcmp(token, "all")) {
@@ -1432,13 +1161,16 @@ static void parse_test_type(int argc, const char **argv)
if (!file_ops)
return;
- if (argc != 3)
+ if (argc != 2)
usage();
+
+ get_finfo(argv[1]);
}
-int main(int argc, const char **argv)
+int main(int argc, char **argv)
{
- struct settings default_settings = {
+ int hpage_pmd_order;
+ struct thp_settings default_settings = {
.thp_enabled = THP_MADVISE,
.thp_defrag = THP_DEFRAG_ALWAYS,
.shmem_enabled = SHMEM_ADVISE,
@@ -1460,9 +1192,6 @@ int main(int argc, const char **argv)
parse_test_type(argc, argv);
- if (file_ops)
- get_finfo(argv[2]);
-
setbuf(stdout, NULL);
page_size = getpagesize();
@@ -1472,14 +1201,17 @@ int main(int argc, const char **argv)
exit(EXIT_FAILURE);
}
hpage_pmd_nr = hpage_pmd_size / page_size;
+ hpage_pmd_order = __builtin_ctz(hpage_pmd_nr);
default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1;
default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8;
default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2;
default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
+ default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT;
+ default_settings.hugepages[anon_order].enabled = THP_ALWAYS;
save_settings();
- push_settings(&default_settings);
+ thp_push_settings(&default_settings);
alloc_at_fault();
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index 00757445278e..246d53a5d7f2 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -5,6 +5,7 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+count_total=0
count_pass=0
count_fail=0
count_skip=0
@@ -17,6 +18,7 @@ usage: ${BASH_SOURCE[0]:-$0} [ options ]
-a: run all tests, including extra ones
-t: specify specific categories to tests to run
-h: display this message
+ -n: disable TAP output
The default behavior is to run required tests only. If -a is specified,
will run all tests.
@@ -77,12 +79,14 @@ EOF
}
RUN_ALL=false
+TAP_PREFIX="# "
-while getopts "aht:" OPT; do
+while getopts "aht:n" OPT; do
case ${OPT} in
"a") RUN_ALL=true ;;
"h") usage ;;
"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
+ "n") TAP_PREFIX= ;;
esac
done
shift $((OPTIND -1))
@@ -184,30 +188,52 @@ fi
VADDR64=0
echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
+tap_prefix() {
+ sed -e "s/^/${TAP_PREFIX}/"
+}
+
+tap_output() {
+ if [[ ! -z "$TAP_PREFIX" ]]; then
+ read str
+ echo $str
+ fi
+}
+
+pretty_name() {
+ echo "$*" | sed -e 's/^\(bash \)\?\.\///'
+}
+
# Usage: run_test [test binary] [arbitrary test arguments...]
run_test() {
if test_selected ${CATEGORY}; then
+ local test=$(pretty_name "$*")
local title="running $*"
local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
- printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
+ printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
- "$@"
- local ret=$?
+ ("$@" 2>&1) | tap_prefix
+ local ret=${PIPESTATUS[0]}
+ count_total=$(( count_total + 1 ))
if [ $ret -eq 0 ]; then
count_pass=$(( count_pass + 1 ))
- echo "[PASS]"
+ echo "[PASS]" | tap_prefix
+ echo "ok ${count_total} ${test}" | tap_output
elif [ $ret -eq $ksft_skip ]; then
count_skip=$(( count_skip + 1 ))
- echo "[SKIP]"
+ echo "[SKIP]" | tap_prefix
+ echo "ok ${count_total} ${test} # SKIP" | tap_output
exitcode=$ksft_skip
else
count_fail=$(( count_fail + 1 ))
- echo "[FAIL]"
+ echo "[FAIL]" | tap_prefix
+ echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
exitcode=1
fi
fi # test_selected
}
+echo "TAP version 13" | tap_output
+
CATEGORY="hugetlb" run_test ./hugepage-mmap
shmmax=$(cat /proc/sys/kernel/shmmax)
@@ -231,9 +257,9 @@ CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
if test_selected "hugetlb"; then
- echo "NOTE: These hugetlb tests provide minimal coverage. Use"
- echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
- echo " hugetlb regression testing."
+ echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix
+ echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
+ echo " hugetlb regression testing." | tap_prefix
fi
CATEGORY="mmap" run_test ./map_fixed_noreplace
@@ -312,7 +338,7 @@ CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
CATEGORY="madv_populate" run_test ./madv_populate
-echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope
+(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
CATEGORY="memfd_secret" run_test ./memfd_secret
# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
@@ -334,8 +360,6 @@ CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
CATEGORY="ksm" run_test ./ksm_functional_tests
-run_test ./ksm_functional_tests
-
# protection_keys tests
if [ -x ./protection_keys_32 ]
then
@@ -359,6 +383,8 @@ CATEGORY="cow" run_test ./cow
CATEGORY="thp" run_test ./khugepaged
+CATEGORY="thp" run_test ./khugepaged -s 2
+
CATEGORY="thp" run_test ./transhuge-stress -d 20
CATEGORY="thp" run_test ./split_huge_page_test
@@ -369,6 +395,7 @@ CATEGORY="mkdirty" run_test ./mkdirty
CATEGORY="mdwe" run_test ./mdwe_test
-echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}"
+echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
+echo "1..${count_total}" | tap_output
exit $exitcode
diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c
new file mode 100644
index 000000000000..a4163438108e
--- /dev/null
+++ b/tools/testing/selftests/mm/thp_settings.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "thp_settings.h"
+
+#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
+#define MAX_SETTINGS_DEPTH 4
+static struct thp_settings settings_stack[MAX_SETTINGS_DEPTH];
+static int settings_index;
+static struct thp_settings saved_settings;
+static char dev_queue_read_ahead_path[PATH_MAX];
+
+static const char * const thp_enabled_strings[] = {
+ "never",
+ "always",
+ "inherit",
+ "madvise",
+ NULL
+};
+
+static const char * const thp_defrag_strings[] = {
+ "always",
+ "defer",
+ "defer+madvise",
+ "madvise",
+ "never",
+ NULL
+};
+
+static const char * const shmem_enabled_strings[] = {
+ "always",
+ "within_size",
+ "advise",
+ "never",
+ "deny",
+ "force",
+ NULL
+};
+
+int read_file(const char *path, char *buf, size_t buflen)
+{
+ int fd;
+ ssize_t numread;
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return 0;
+
+ numread = read(fd, buf, buflen - 1);
+ if (numread < 1) {
+ close(fd);
+ return 0;
+ }
+
+ buf[numread] = '\0';
+ close(fd);
+
+ return (unsigned int) numread;
+}
+
+int write_file(const char *path, const char *buf, size_t buflen)
+{
+ int fd;
+ ssize_t numwritten;
+
+ fd = open(path, O_WRONLY);
+ if (fd == -1) {
+ printf("open(%s)\n", path);
+ exit(EXIT_FAILURE);
+ return 0;
+ }
+
+ numwritten = write(fd, buf, buflen - 1);
+ close(fd);
+ if (numwritten < 1) {
+ printf("write(%s)\n", buf);
+ exit(EXIT_FAILURE);
+ return 0;
+ }
+
+ return (unsigned int) numwritten;
+}
+
+const unsigned long read_num(const char *path)
+{
+ char buf[21];
+
+ if (read_file(path, buf, sizeof(buf)) < 0) {
+ perror("read_file()");
+ exit(EXIT_FAILURE);
+ }
+
+ return strtoul(buf, NULL, 10);
+}
+
+void write_num(const char *path, unsigned long num)
+{
+ char buf[21];
+
+ sprintf(buf, "%ld", num);
+ if (!write_file(path, buf, strlen(buf) + 1)) {
+ perror(path);
+ exit(EXIT_FAILURE);
+ }
+}
+
+int thp_read_string(const char *name, const char * const strings[])
+{
+ char path[PATH_MAX];
+ char buf[256];
+ char *c;
+ int ret;
+
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!read_file(path, buf, sizeof(buf))) {
+ perror(path);
+ exit(EXIT_FAILURE);
+ }
+
+ c = strchr(buf, '[');
+ if (!c) {
+ printf("%s: Parse failure\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ c++;
+ memmove(buf, c, sizeof(buf) - (c - buf));
+
+ c = strchr(buf, ']');
+ if (!c) {
+ printf("%s: Parse failure\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ *c = '\0';
+
+ ret = 0;
+ while (strings[ret]) {
+ if (!strcmp(strings[ret], buf))
+ return ret;
+ ret++;
+ }
+
+ printf("Failed to parse %s\n", name);
+ exit(EXIT_FAILURE);
+}
+
+void thp_write_string(const char *name, const char *val)
+{
+ char path[PATH_MAX];
+ int ret;
+
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!write_file(path, val, strlen(val) + 1)) {
+ perror(path);
+ exit(EXIT_FAILURE);
+ }
+}
+
+const unsigned long thp_read_num(const char *name)
+{
+ char path[PATH_MAX];
+ int ret;
+
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ return read_num(path);
+}
+
+void thp_write_num(const char *name, unsigned long num)
+{
+ char path[PATH_MAX];
+ int ret;
+
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ write_num(path, num);
+}
+
+void thp_read_settings(struct thp_settings *settings)
+{
+ unsigned long orders = thp_supported_orders();
+ char path[PATH_MAX];
+ int i;
+
+ *settings = (struct thp_settings) {
+ .thp_enabled = thp_read_string("enabled", thp_enabled_strings),
+ .thp_defrag = thp_read_string("defrag", thp_defrag_strings),
+ .shmem_enabled =
+ thp_read_string("shmem_enabled", shmem_enabled_strings),
+ .use_zero_page = thp_read_num("use_zero_page"),
+ };
+ settings->khugepaged = (struct khugepaged_settings) {
+ .defrag = thp_read_num("khugepaged/defrag"),
+ .alloc_sleep_millisecs =
+ thp_read_num("khugepaged/alloc_sleep_millisecs"),
+ .scan_sleep_millisecs =
+ thp_read_num("khugepaged/scan_sleep_millisecs"),
+ .max_ptes_none = thp_read_num("khugepaged/max_ptes_none"),
+ .max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap"),
+ .max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared"),
+ .pages_to_scan = thp_read_num("khugepaged/pages_to_scan"),
+ };
+ if (dev_queue_read_ahead_path[0])
+ settings->read_ahead_kb = read_num(dev_queue_read_ahead_path);
+
+ for (i = 0; i < NR_ORDERS; i++) {
+ if (!((1 << i) & orders)) {
+ settings->hugepages[i].enabled = THP_NEVER;
+ continue;
+ }
+ snprintf(path, PATH_MAX, "hugepages-%ukB/enabled",
+ (getpagesize() >> 10) << i);
+ settings->hugepages[i].enabled =
+ thp_read_string(path, thp_enabled_strings);
+ }
+}
+
+void thp_write_settings(struct thp_settings *settings)
+{
+ struct khugepaged_settings *khugepaged = &settings->khugepaged;
+ unsigned long orders = thp_supported_orders();
+ char path[PATH_MAX];
+ int enabled;
+ int i;
+
+ thp_write_string("enabled", thp_enabled_strings[settings->thp_enabled]);
+ thp_write_string("defrag", thp_defrag_strings[settings->thp_defrag]);
+ thp_write_string("shmem_enabled",
+ shmem_enabled_strings[settings->shmem_enabled]);
+ thp_write_num("use_zero_page", settings->use_zero_page);
+
+ thp_write_num("khugepaged/defrag", khugepaged->defrag);
+ thp_write_num("khugepaged/alloc_sleep_millisecs",
+ khugepaged->alloc_sleep_millisecs);
+ thp_write_num("khugepaged/scan_sleep_millisecs",
+ khugepaged->scan_sleep_millisecs);
+ thp_write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none);
+ thp_write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
+ thp_write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared);
+ thp_write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
+
+ if (dev_queue_read_ahead_path[0])
+ write_num(dev_queue_read_ahead_path, settings->read_ahead_kb);
+
+ for (i = 0; i < NR_ORDERS; i++) {
+ if (!((1 << i) & orders))
+ continue;
+ snprintf(path, PATH_MAX, "hugepages-%ukB/enabled",
+ (getpagesize() >> 10) << i);
+ enabled = settings->hugepages[i].enabled;
+ thp_write_string(path, thp_enabled_strings[enabled]);
+ }
+}
+
+struct thp_settings *thp_current_settings(void)
+{
+ if (!settings_index) {
+ printf("Fail: No settings set");
+ exit(EXIT_FAILURE);
+ }
+ return settings_stack + settings_index - 1;
+}
+
+void thp_push_settings(struct thp_settings *settings)
+{
+ if (settings_index >= MAX_SETTINGS_DEPTH) {
+ printf("Fail: Settings stack exceeded");
+ exit(EXIT_FAILURE);
+ }
+ settings_stack[settings_index++] = *settings;
+ thp_write_settings(thp_current_settings());
+}
+
+void thp_pop_settings(void)
+{
+ if (settings_index <= 0) {
+ printf("Fail: Settings stack empty");
+ exit(EXIT_FAILURE);
+ }
+ --settings_index;
+ thp_write_settings(thp_current_settings());
+}
+
+void thp_restore_settings(void)
+{
+ thp_write_settings(&saved_settings);
+}
+
+void thp_save_settings(void)
+{
+ thp_read_settings(&saved_settings);
+}
+
+void thp_set_read_ahead_path(char *path)
+{
+ if (!path) {
+ dev_queue_read_ahead_path[0] = '\0';
+ return;
+ }
+
+ strncpy(dev_queue_read_ahead_path, path,
+ sizeof(dev_queue_read_ahead_path));
+ dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0';
+}
+
+unsigned long thp_supported_orders(void)
+{
+ unsigned long orders = 0;
+ char path[PATH_MAX];
+ char buf[256];
+ int ret;
+ int i;
+
+ for (i = 0; i < NR_ORDERS; i++) {
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/enabled",
+ (getpagesize() >> 10) << i);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ ret = read_file(path, buf, sizeof(buf));
+ if (ret)
+ orders |= 1UL << i;
+ }
+
+ return orders;
+}
diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h
new file mode 100644
index 000000000000..71cbff05f4c7
--- /dev/null
+++ b/tools/testing/selftests/mm/thp_settings.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __THP_SETTINGS_H__
+#define __THP_SETTINGS_H__
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+enum thp_enabled {
+ THP_NEVER,
+ THP_ALWAYS,
+ THP_INHERIT,
+ THP_MADVISE,
+};
+
+enum thp_defrag {
+ THP_DEFRAG_ALWAYS,
+ THP_DEFRAG_DEFER,
+ THP_DEFRAG_DEFER_MADVISE,
+ THP_DEFRAG_MADVISE,
+ THP_DEFRAG_NEVER,
+};
+
+enum shmem_enabled {
+ SHMEM_ALWAYS,
+ SHMEM_WITHIN_SIZE,
+ SHMEM_ADVISE,
+ SHMEM_NEVER,
+ SHMEM_DENY,
+ SHMEM_FORCE,
+};
+
+#define NR_ORDERS 20
+
+struct hugepages_settings {
+ enum thp_enabled enabled;
+};
+
+struct khugepaged_settings {
+ bool defrag;
+ unsigned int alloc_sleep_millisecs;
+ unsigned int scan_sleep_millisecs;
+ unsigned int max_ptes_none;
+ unsigned int max_ptes_swap;
+ unsigned int max_ptes_shared;
+ unsigned long pages_to_scan;
+};
+
+struct thp_settings {
+ enum thp_enabled thp_enabled;
+ enum thp_defrag thp_defrag;
+ enum shmem_enabled shmem_enabled;
+ bool use_zero_page;
+ struct khugepaged_settings khugepaged;
+ unsigned long read_ahead_kb;
+ struct hugepages_settings hugepages[NR_ORDERS];
+};
+
+int read_file(const char *path, char *buf, size_t buflen);
+int write_file(const char *path, const char *buf, size_t buflen);
+const unsigned long read_num(const char *path);
+void write_num(const char *path, unsigned long num);
+
+int thp_read_string(const char *name, const char * const strings[]);
+void thp_write_string(const char *name, const char *val);
+const unsigned long thp_read_num(const char *name);
+void thp_write_num(const char *name, unsigned long num);
+
+void thp_write_settings(struct thp_settings *settings);
+void thp_read_settings(struct thp_settings *settings);
+struct thp_settings *thp_current_settings(void);
+void thp_push_settings(struct thp_settings *settings);
+void thp_pop_settings(void);
+void thp_restore_settings(void);
+void thp_save_settings(void);
+
+void thp_set_read_ahead_path(char *path);
+unsigned long thp_supported_orders(void);
+
+#endif /* __THP_SETTINGS_H__ */
diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c
index 16ed4dfa7359..622987f12c89 100644
--- a/tools/testing/selftests/mm/thuge-gen.c
+++ b/tools/testing/selftests/mm/thuge-gen.c
@@ -3,7 +3,8 @@
Before running this huge pages for each huge page size must have been
reserved.
- For large pages beyond MAX_ORDER (like 1GB on x86) boot options must be used.
+ For large pages beyond MAX_PAGE_ORDER (like 1GB on x86) boot options must
+ be used.
Also shmmax must be increased.
And you need to run as root to work around some weird permissions in shm.
And nothing using huge pages should run in parallel.
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 02b89860e193..b0ac0ec2356d 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -17,6 +17,7 @@ bool map_shared;
bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
+uffd_test_case_ops_t *uffd_test_case_ops;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -262,7 +263,7 @@ static inline void munmap_area(void **area)
*area = NULL;
}
-static void uffd_test_ctx_clear(void)
+void uffd_test_ctx_clear(void)
{
size_t i;
@@ -298,7 +299,11 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
unsigned long nr, cpu;
int ret;
- uffd_test_ctx_clear();
+ if (uffd_test_case_ops && uffd_test_case_ops->pre_alloc) {
+ ret = uffd_test_case_ops->pre_alloc(errmsg);
+ if (ret)
+ return ret;
+ }
ret = uffd_test_ops->allocate_area((void **)&area_src, true);
ret |= uffd_test_ops->allocate_area((void **)&area_dst, false);
@@ -308,6 +313,12 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
return ret;
}
+ if (uffd_test_case_ops && uffd_test_case_ops->post_alloc) {
+ ret = uffd_test_case_ops->post_alloc(errmsg);
+ if (ret)
+ return ret;
+ }
+
ret = userfaultfd_open(&features);
if (ret) {
if (errmsg)
@@ -620,6 +631,30 @@ int copy_page(int ufd, unsigned long offset, bool wp)
return __copy_page(ufd, offset, false, wp);
}
+int move_page(int ufd, unsigned long offset, unsigned long len)
+{
+ struct uffdio_move uffdio_move;
+
+ if (offset + len > nr_pages * page_size)
+ err("unexpected offset %lu and length %lu\n", offset, len);
+ uffdio_move.dst = (unsigned long) area_dst + offset;
+ uffdio_move.src = (unsigned long) area_src + offset;
+ uffdio_move.len = len;
+ uffdio_move.mode = UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES;
+ uffdio_move.move = 0;
+ if (ioctl(ufd, UFFDIO_MOVE, &uffdio_move)) {
+ /* real retval in uffdio_move.move */
+ if (uffdio_move.move != -EEXIST)
+ err("UFFDIO_MOVE error: %"PRId64,
+ (int64_t)uffdio_move.move);
+ wake_range(ufd, uffdio_move.dst, len);
+ } else if (uffdio_move.move != len) {
+ err("UFFDIO_MOVE error: %"PRId64, (int64_t)uffdio_move.move);
+ } else
+ return 1;
+ return 0;
+}
+
int uffd_open_dev(unsigned int flags)
{
int fd, uffd;
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index 7c4fa964c3b0..cb055282c89c 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -90,6 +90,12 @@ struct uffd_test_ops {
};
typedef struct uffd_test_ops uffd_test_ops_t;
+struct uffd_test_case_ops {
+ int (*pre_alloc)(const char **errmsg);
+ int (*post_alloc)(const char **errmsg);
+};
+typedef struct uffd_test_case_ops uffd_test_case_ops_t;
+
extern unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
extern int uffd, uffd_flags, finished, *pipefd, test_type;
@@ -102,15 +108,18 @@ extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
extern uffd_test_ops_t hugetlb_uffd_test_ops;
extern uffd_test_ops_t *uffd_test_ops;
+extern uffd_test_case_ops_t *uffd_test_case_ops;
void uffd_stats_report(struct uffd_args *args, int n_cpus);
int uffd_test_ctx_init(uint64_t features, const char **errmsg);
+void uffd_test_ctx_clear(void);
int userfaultfd_open(uint64_t *features);
int uffd_read_msg(int ufd, struct uffd_msg *msg);
void wp_range(int ufd, __u64 start, __u64 len, bool wp);
void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args);
int __copy_page(int ufd, unsigned long offset, bool retry, bool wp);
int copy_page(int ufd, unsigned long offset, bool wp);
+int move_page(int ufd, unsigned long offset, unsigned long len);
void *uffd_poll_thread(void *arg);
int uffd_open_dev(unsigned int flags);
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index 469e0476af26..7e83829bbb33 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -323,8 +323,10 @@ static int userfaultfd_stress(void)
uffd_stats_reset(args, nr_cpus);
/* bounce pass */
- if (stress(args))
+ if (stress(args)) {
+ uffd_test_ctx_clear();
return 1;
+ }
/* Clear all the write protections if there is any */
if (test_uffdio_wp)
@@ -354,6 +356,7 @@ static int userfaultfd_stress(void)
uffd_stats_report(args, nr_cpus);
}
+ uffd_test_ctx_clear();
return 0;
}
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 2709a34a39c5..cce90a10515a 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -23,6 +23,9 @@
#define MEM_ALL (MEM_ANON | MEM_SHMEM | MEM_SHMEM_PRIVATE | \
MEM_HUGETLB | MEM_HUGETLB_PRIVATE)
+#define ALIGN_UP(x, align_to) \
+ ((__typeof__(x))((((unsigned long)(x)) + ((align_to)-1)) & ~((align_to)-1)))
+
struct mem_type {
const char *name;
unsigned int mem_flag;
@@ -78,6 +81,7 @@ typedef struct {
uffd_test_fn uffd_fn;
unsigned int mem_targets;
uint64_t uffd_feature_required;
+ uffd_test_case_ops_t *test_case_ops;
} uffd_test_case_t;
static void uffd_test_report(void)
@@ -185,6 +189,7 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test,
{
map_shared = mem_type->shared;
uffd_test_ops = mem_type->mem_ops;
+ uffd_test_case_ops = test->test_case_ops;
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
page_size = default_huge_page_size();
@@ -1062,6 +1067,188 @@ static void uffd_poison_test(uffd_test_args_t *targs)
uffd_test_pass();
}
+static void
+uffd_move_handle_fault_common(struct uffd_msg *msg, struct uffd_args *args,
+ unsigned long len)
+{
+ unsigned long offset;
+
+ if (msg->event != UFFD_EVENT_PAGEFAULT)
+ err("unexpected msg event %u", msg->event);
+
+ if (msg->arg.pagefault.flags &
+ (UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR | UFFD_PAGEFAULT_FLAG_WRITE))
+ err("unexpected fault type %llu", msg->arg.pagefault.flags);
+
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
+ offset &= ~(len-1);
+
+ if (move_page(uffd, offset, len))
+ args->missing_faults++;
+}
+
+static void uffd_move_handle_fault(struct uffd_msg *msg,
+ struct uffd_args *args)
+{
+ uffd_move_handle_fault_common(msg, args, page_size);
+}
+
+static void uffd_move_pmd_handle_fault(struct uffd_msg *msg,
+ struct uffd_args *args)
+{
+ uffd_move_handle_fault_common(msg, args, read_pmd_pagesize());
+}
+
+static void
+uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
+ void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args))
+{
+ unsigned long nr;
+ pthread_t uffd_mon;
+ char c;
+ unsigned long long count;
+ struct uffd_args args = { 0 };
+ char *orig_area_src, *orig_area_dst;
+ unsigned long step_size, step_count;
+ unsigned long src_offs = 0;
+ unsigned long dst_offs = 0;
+
+ /* Prevent source pages from being mapped more than once */
+ if (madvise(area_src, nr_pages * page_size, MADV_DONTFORK))
+ err("madvise(MADV_DONTFORK) failure");
+
+ if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ true, false, false))
+ err("register failure");
+
+ args.handle_fault = handle_fault;
+ if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+ err("uffd_poll_thread create");
+
+ step_size = chunk_size / page_size;
+ step_count = nr_pages / step_size;
+
+ if (chunk_size > page_size) {
+ char *aligned_src = ALIGN_UP(area_src, chunk_size);
+ char *aligned_dst = ALIGN_UP(area_dst, chunk_size);
+
+ if (aligned_src != area_src || aligned_dst != area_dst) {
+ src_offs = (aligned_src - area_src) / page_size;
+ dst_offs = (aligned_dst - area_dst) / page_size;
+ step_count--;
+ }
+ orig_area_src = area_src;
+ orig_area_dst = area_dst;
+ area_src = aligned_src;
+ area_dst = aligned_dst;
+ }
+
+ /*
+ * Read each of the pages back using the UFFD-registered mapping. We
+ * expect that the first time we touch a page, it will result in a missing
+ * fault. uffd_poll_thread will resolve the fault by moving source
+ * page to destination.
+ */
+ for (nr = 0; nr < step_count * step_size; nr += step_size) {
+ unsigned long i;
+
+ /* Check area_src content */
+ for (i = 0; i < step_size; i++) {
+ count = *area_count(area_src, nr + i);
+ if (count != count_verify[src_offs + nr + i])
+ err("nr %lu source memory invalid %llu %llu\n",
+ nr + i, count, count_verify[src_offs + nr + i]);
+ }
+
+ /* Faulting into area_dst should move the page or the huge page */
+ for (i = 0; i < step_size; i++) {
+ count = *area_count(area_dst, nr + i);
+ if (count != count_verify[dst_offs + nr + i])
+ err("nr %lu memory corruption %llu %llu\n",
+ nr, count, count_verify[dst_offs + nr + i]);
+ }
+
+ /* Re-check area_src content which should be empty */
+ for (i = 0; i < step_size; i++) {
+ count = *area_count(area_src, nr + i);
+ if (count != 0)
+ err("nr %lu move failed %llu %llu\n",
+ nr, count, count_verify[src_offs + nr + i]);
+ }
+ }
+ if (step_size > page_size) {
+ area_src = orig_area_src;
+ area_dst = orig_area_dst;
+ }
+
+ if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ err("pipe write");
+ if (pthread_join(uffd_mon, NULL))
+ err("join() failed");
+
+ if (args.missing_faults != step_count || args.minor_faults != 0)
+ uffd_test_fail("stats check error");
+ else
+ uffd_test_pass();
+}
+
+static void uffd_move_test(uffd_test_args_t *targs)
+{
+ uffd_move_test_common(targs, page_size, uffd_move_handle_fault);
+}
+
+static void uffd_move_pmd_test(uffd_test_args_t *targs)
+{
+ if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE))
+ err("madvise(MADV_HUGEPAGE) failure");
+ uffd_move_test_common(targs, read_pmd_pagesize(),
+ uffd_move_pmd_handle_fault);
+}
+
+static void uffd_move_pmd_split_test(uffd_test_args_t *targs)
+{
+ if (madvise(area_dst, nr_pages * page_size, MADV_NOHUGEPAGE))
+ err("madvise(MADV_NOHUGEPAGE) failure");
+ uffd_move_test_common(targs, read_pmd_pagesize(),
+ uffd_move_pmd_handle_fault);
+}
+
+static int prevent_hugepages(const char **errmsg)
+{
+ /* This should be done before source area is populated */
+ if (madvise(area_src, nr_pages * page_size, MADV_NOHUGEPAGE)) {
+ /* Ignore only if CONFIG_TRANSPARENT_HUGEPAGE=n */
+ if (errno != EINVAL) {
+ if (errmsg)
+ *errmsg = "madvise(MADV_NOHUGEPAGE) failed";
+ return -errno;
+ }
+ }
+ return 0;
+}
+
+static int request_hugepages(const char **errmsg)
+{
+ /* This should be done before source area is populated */
+ if (madvise(area_src, nr_pages * page_size, MADV_HUGEPAGE)) {
+ if (errmsg) {
+ *errmsg = (errno == EINVAL) ?
+ "CONFIG_TRANSPARENT_HUGEPAGE is not set" :
+ "madvise(MADV_HUGEPAGE) failed";
+ }
+ return -errno;
+ }
+ return 0;
+}
+
+struct uffd_test_case_ops uffd_move_test_case_ops = {
+ .post_alloc = prevent_hugepages,
+};
+
+struct uffd_test_case_ops uffd_move_test_pmd_case_ops = {
+ .post_alloc = request_hugepages,
+};
+
/*
* Test the returned uffdio_register.ioctls with different register modes.
* Note that _UFFDIO_ZEROPAGE is tested separately in the zeropage test.
@@ -1140,6 +1327,27 @@ uffd_test_case_t uffd_tests[] = {
.uffd_feature_required = 0,
},
{
+ .name = "move",
+ .uffd_fn = uffd_move_test,
+ .mem_targets = MEM_ANON,
+ .uffd_feature_required = UFFD_FEATURE_MOVE,
+ .test_case_ops = &uffd_move_test_case_ops,
+ },
+ {
+ .name = "move-pmd",
+ .uffd_fn = uffd_move_pmd_test,
+ .mem_targets = MEM_ANON,
+ .uffd_feature_required = UFFD_FEATURE_MOVE,
+ .test_case_ops = &uffd_move_test_pmd_case_ops,
+ },
+ {
+ .name = "move-pmd-split",
+ .uffd_fn = uffd_move_pmd_split_test,
+ .mem_targets = MEM_ANON,
+ .uffd_feature_required = UFFD_FEATURE_MOVE,
+ .test_case_ops = &uffd_move_test_pmd_case_ops,
+ },
+ {
.name = "wp-fork",
.uffd_fn = uffd_wp_fork_test,
.mem_targets = MEM_ALL,
@@ -1319,6 +1527,7 @@ int main(int argc, char *argv[])
continue;
}
test->uffd_fn(&args);
+ uffd_test_ctx_clear();
}
}
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index 3082b40492dd..05736c615734 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -4,6 +4,7 @@
#include <dirent.h>
#include <sys/ioctl.h>
#include <linux/userfaultfd.h>
+#include <linux/fs.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "../kselftest.h"
@@ -28,19 +29,92 @@ uint64_t pagemap_get_entry(int fd, char *start)
return entry;
}
+static uint64_t __pagemap_scan_get_categories(int fd, char *start, struct page_region *r)
+{
+ struct pm_scan_arg arg;
+
+ arg.start = (uintptr_t)start;
+ arg.end = (uintptr_t)(start + psize());
+ arg.vec = (uintptr_t)r;
+ arg.vec_len = 1;
+ arg.flags = 0;
+ arg.size = sizeof(struct pm_scan_arg);
+ arg.max_pages = 0;
+ arg.category_inverted = 0;
+ arg.category_mask = 0;
+ arg.category_anyof_mask = PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | PAGE_IS_FILE |
+ PAGE_IS_PRESENT | PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |
+ PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY;
+ arg.return_mask = arg.category_anyof_mask;
+
+ return ioctl(fd, PAGEMAP_SCAN, &arg);
+}
+
+static uint64_t pagemap_scan_get_categories(int fd, char *start)
+{
+ struct page_region r;
+ long ret;
+
+ ret = __pagemap_scan_get_categories(fd, start, &r);
+ if (ret < 0)
+ ksft_exit_fail_msg("PAGEMAP_SCAN failed: %s\n", strerror(errno));
+ if (ret == 0)
+ return 0;
+ return r.categories;
+}
+
+/* `start` is any valid address. */
+static bool pagemap_scan_supported(int fd, char *start)
+{
+ static int supported = -1;
+ int ret;
+
+ if (supported != -1)
+ return supported;
+
+ /* Provide an invalid address in order to trigger EFAULT. */
+ ret = __pagemap_scan_get_categories(fd, start, (struct page_region *) ~0UL);
+ if (ret == 0)
+ ksft_exit_fail_msg("PAGEMAP_SCAN succeeded unexpectedly\n");
+
+ supported = errno == EFAULT;
+
+ return supported;
+}
+
+static bool page_entry_is(int fd, char *start, char *desc,
+ uint64_t pagemap_flags, uint64_t pagescan_flags)
+{
+ bool m = pagemap_get_entry(fd, start) & pagemap_flags;
+
+ if (pagemap_scan_supported(fd, start)) {
+ bool s = pagemap_scan_get_categories(fd, start) & pagescan_flags;
+
+ if (m == s)
+ return m;
+
+ ksft_exit_fail_msg(
+ "read and ioctl return unmatched results for %s: %d %d", desc, m, s);
+ }
+ return m;
+}
+
bool pagemap_is_softdirty(int fd, char *start)
{
- return pagemap_get_entry(fd, start) & PM_SOFT_DIRTY;
+ return page_entry_is(fd, start, "soft-dirty",
+ PM_SOFT_DIRTY, PAGE_IS_SOFT_DIRTY);
}
bool pagemap_is_swapped(int fd, char *start)
{
- return pagemap_get_entry(fd, start) & PM_SWAP;
+ return page_entry_is(fd, start, "swap", PM_SWAP, PAGE_IS_SWAPPED);
}
bool pagemap_is_populated(int fd, char *start)
{
- return pagemap_get_entry(fd, start) & (PM_PRESENT | PM_SWAP);
+ return page_entry_is(fd, start, "populated",
+ PM_PRESENT | PM_SWAP,
+ PAGE_IS_PRESENT | PAGE_IS_SWAPPED);
}
unsigned long pagemap_get_pfn(int fd, char *start)
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 7ea42fa02eab..c376151982c4 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -32,6 +32,8 @@ SUB_DIRS = alignment \
vphn \
math \
papr_attributes \
+ papr_vpd \
+ papr_sysparm \
ptrace \
security \
mce
diff --git a/tools/testing/selftests/powerpc/math/fpu.h b/tools/testing/selftests/powerpc/math/fpu.h
new file mode 100644
index 000000000000..a8ad0d42604e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/fpu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2023, Michael Ellerman, IBM Corporation.
+ */
+
+#ifndef _SELFTESTS_POWERPC_FPU_H
+#define _SELFTESTS_POWERPC_FPU_H
+
+static inline void randomise_darray(double *darray, int num)
+{
+ long val;
+
+ for (int i = 0; i < num; i++) {
+ val = random();
+ if (val & 1)
+ val *= -1;
+
+ if (val & 2)
+ darray[i] = 1.0 / val;
+ else
+ darray[i] = val * val;
+ }
+}
+
+#endif /* _SELFTESTS_POWERPC_FPU_H */
diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S
index 9dc0c158f871..efe1e1be4695 100644
--- a/tools/testing/selftests/powerpc/math/fpu_asm.S
+++ b/tools/testing/selftests/powerpc/math/fpu_asm.S
@@ -66,6 +66,40 @@ FUNC_START(check_fpu)
li r3,0 # Success!!!
1: blr
+
+// int check_all_fprs(double darray[32])
+FUNC_START(check_all_fprs)
+ PUSH_BASIC_STACK(8)
+ mr r4, r3 // r4 = darray
+ li r3, 1 // prepare for failure
+
+ stfd f31, STACK_FRAME_LOCAL(0, 0)(sp) // backup f31
+
+ // Check regs f0-f30, using f31 as scratch
+ .set i, 0
+ .rept 31
+ lfd f31, (8 * i)(r4) // load expected value
+ fcmpu cr0, i, f31 // compare
+ bne cr0, 1f // bail if mismatch
+ .set i, i + 1
+ .endr
+
+ lfd f31, STACK_FRAME_LOCAL(0, 0)(sp) // reload f31
+ stfd f30, STACK_FRAME_LOCAL(0, 0)(sp) // backup f30
+
+ lfd f30, (8 * 31)(r4) // load expected value of f31
+ fcmpu cr0, f30, f31 // compare
+ bne cr0, 1f // bail if mismatch
+
+ lfd f30, STACK_FRAME_LOCAL(0, 0)(sp) // reload f30
+
+ // Success
+ li r3, 0
+
+1: POP_BASIC_STACK(8)
+ blr
+FUNC_END(check_all_fprs)
+
FUNC_START(test_fpu)
# r3 holds pointer to where to put the result of fork
# r4 holds pointer to the pid
@@ -75,8 +109,9 @@ FUNC_START(test_fpu)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray
std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid
- bl load_fpu
- nop
+ // Load FPRs with expected values
+ OP_REGS lfd, 8, 0, 31, r3
+
li r0,__NR_fork
sc
@@ -85,7 +120,7 @@ FUNC_START(test_fpu)
std r3,0(r9)
ld r3,STACK_FRAME_PARAM(0)(sp)
- bl check_fpu
+ bl check_all_fprs
nop
POP_FPU(256)
@@ -104,8 +139,8 @@ FUNC_START(preempt_fpu)
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
- bl load_fpu
- nop
+ // Load FPRs with expected values
+ OP_REGS lfd, 8, 0, 31, r3
sync
# Atomic DEC
@@ -116,8 +151,7 @@ FUNC_START(preempt_fpu)
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
- bl check_fpu
- nop
+ bl check_all_fprs
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
index 5235bdc8c0b1..9ddede0770ed 100644
--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
+++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
@@ -1,13 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
+ * Copyright 2023, Michael Ellerman, IBM Corp.
*
* This test attempts to see if the FPU registers change across preemption.
- * Two things should be noted here a) The check_fpu function in asm only checks
- * the non volatile registers as it is reused from the syscall test b) There is
- * no way to be sure preemption happened so this test just uses many threads
- * and a long wait. As such, a successful test doesn't mean much but a failure
- * is bad.
+ * There is no way to be sure preemption happened so this test just uses many
+ * threads and a long wait. As such, a successful test doesn't mean much but
+ * a failure is bad.
*/
#include <stdio.h>
@@ -20,9 +19,10 @@
#include <pthread.h>
#include "utils.h"
+#include "fpu.h"
/* Time to wait for workers to get preempted (seconds) */
-#define PREEMPT_TIME 20
+#define PREEMPT_TIME 60
/*
* Factor by which to multiply number of online CPUs for total number of
* worker threads
@@ -30,26 +30,22 @@
#define THREAD_FACTOR 8
-__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
- 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
- 2.1};
+__thread double darray[32];
int threads_starting;
int running;
-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
+extern int preempt_fpu(double *darray, int *threads_starting, int *running);
void *preempt_fpu_c(void *p)
{
- int i;
- srand(pthread_self());
- for (i = 0; i < 21; i++)
- darray[i] = rand();
+ long rc;
- /* Test failed if it ever returns */
- preempt_fpu(darray, &threads_starting, &running);
+ srand(pthread_self());
+ randomise_darray(darray, ARRAY_SIZE(darray));
+ rc = preempt_fpu(darray, &threads_starting, &running);
- return p;
+ return (void *)rc;
}
int test_preempt_fpu(void)
diff --git a/tools/testing/selftests/powerpc/math/fpu_syscall.c b/tools/testing/selftests/powerpc/math/fpu_syscall.c
index 694f225c7e45..751d46b133fc 100644
--- a/tools/testing/selftests/powerpc/math/fpu_syscall.c
+++ b/tools/testing/selftests/powerpc/math/fpu_syscall.c
@@ -14,12 +14,11 @@
#include <stdlib.h>
#include "utils.h"
+#include "fpu.h"
extern int test_fpu(double *darray, pid_t *pid);
-double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
- 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
- 2.1};
+double darray[32];
int syscall_fpu(void)
{
@@ -27,6 +26,9 @@ int syscall_fpu(void)
int i;
int ret;
int child_ret;
+
+ randomise_darray(darray, ARRAY_SIZE(darray));
+
for (i = 0; i < 1000; i++) {
/* test_fpu will fork() */
ret = test_fpu(darray, &fork_pid);
diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
index 6761d6ce30ec..6f7cf400c687 100644
--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
int threads_starting;
int running;
-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
+extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);
void *preempt_vmx_c(void *p)
{
int i, j;
+ long rc;
+
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++)
varray[i][j] = rand();
- /* Test fails if it ever returns */
- preempt_vmx(varray, &threads_starting, &running);
- return p;
+ rc = preempt_vmx(varray, &threads_starting, &running);
+
+ return (void *)rc;
}
int test_preempt_vmx(void)
diff --git a/tools/testing/selftests/powerpc/papr_sysparm/.gitignore b/tools/testing/selftests/powerpc/papr_sysparm/.gitignore
new file mode 100644
index 000000000000..f2a69bf59d40
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_sysparm/.gitignore
@@ -0,0 +1 @@
+/papr_sysparm
diff --git a/tools/testing/selftests/powerpc/papr_sysparm/Makefile b/tools/testing/selftests/powerpc/papr_sysparm/Makefile
new file mode 100644
index 000000000000..7f79e437634a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_sysparm/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+noarg:
+ $(MAKE) -C ../
+
+TEST_GEN_PROGS := papr_sysparm
+
+top_srcdir = ../../../../..
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
+
+$(OUTPUT)/papr_sysparm: CFLAGS += $(KHDR_INCLUDES)
diff --git a/tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c b/tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c
new file mode 100644
index 000000000000..f56c15a11e2f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <asm/papr-sysparm.h>
+
+#include "utils.h"
+
+#define DEVPATH "/dev/papr-sysparm"
+
+static int open_close(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int get_splpar(void)
+{
+ struct papr_sysparm_io_block sp = {
+ .parameter = 20, // SPLPAR characteristics
+ };
+ const int devfd = open(DEVPATH, O_RDONLY);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+ FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_GET, &sp) != 0);
+ FAIL_IF(sp.length == 0);
+ FAIL_IF(sp.length > sizeof(sp.data));
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int get_bad_parameter(void)
+{
+ struct papr_sysparm_io_block sp = {
+ .parameter = UINT32_MAX, // there are only ~60 specified parameters
+ };
+ const int devfd = open(DEVPATH, O_RDONLY);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ // Ensure expected error
+ FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_GET, &sp) != -1);
+ FAIL_IF(errno != EOPNOTSUPP);
+
+ // Ensure the buffer is unchanged
+ FAIL_IF(sp.length != 0);
+ for (size_t i = 0; i < ARRAY_SIZE(sp.data); ++i)
+ FAIL_IF(sp.data[i] != 0);
+
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int check_efault_common(unsigned long cmd)
+{
+ const int devfd = open(DEVPATH, O_RDWR);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ // Ensure expected error
+ FAIL_IF(ioctl(devfd, cmd, NULL) != -1);
+ FAIL_IF(errno != EFAULT);
+
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int check_efault_get(void)
+{
+ return check_efault_common(PAPR_SYSPARM_IOC_GET);
+}
+
+static int check_efault_set(void)
+{
+ return check_efault_common(PAPR_SYSPARM_IOC_SET);
+}
+
+static int set_hmc0(void)
+{
+ struct papr_sysparm_io_block sp = {
+ .parameter = 0, // HMC0, not a settable parameter
+ };
+ const int devfd = open(DEVPATH, O_RDWR);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ // Ensure expected error
+ FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_SET, &sp) != -1);
+ SKIP_IF_MSG(errno == EOPNOTSUPP, "operation not supported");
+ FAIL_IF(errno != EPERM);
+
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int set_with_ro_fd(void)
+{
+ struct papr_sysparm_io_block sp = {
+ .parameter = 0, // HMC0, not a settable parameter.
+ };
+ const int devfd = open(DEVPATH, O_RDONLY);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ // Ensure expected error
+ FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_SET, &sp) != -1);
+ SKIP_IF_MSG(errno == EOPNOTSUPP, "operation not supported");
+
+ // HMC0 isn't a settable parameter and we would normally
+ // expect to get EPERM on attempts to modify it. However, when
+ // the file is open read-only, we expect the driver to prevent
+ // the attempt with a distinct error.
+ FAIL_IF(errno != EBADF);
+
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+struct sysparm_test {
+ int (*function)(void);
+ const char *description;
+};
+
+static const struct sysparm_test sysparm_tests[] = {
+ {
+ .function = open_close,
+ .description = "open and close " DEVPATH " without issuing commands",
+ },
+ {
+ .function = get_splpar,
+ .description = "retrieve SPLPAR characteristics",
+ },
+ {
+ .function = get_bad_parameter,
+ .description = "verify EOPNOTSUPP for known-bad parameter",
+ },
+ {
+ .function = check_efault_get,
+ .description = "PAPR_SYSPARM_IOC_GET returns EFAULT on bad address",
+ },
+ {
+ .function = check_efault_set,
+ .description = "PAPR_SYSPARM_IOC_SET returns EFAULT on bad address",
+ },
+ {
+ .function = set_hmc0,
+ .description = "ensure EPERM on attempt to update HMC0",
+ },
+ {
+ .function = set_with_ro_fd,
+ .description = "PAPR_IOC_SYSPARM_SET returns EACCES on read-only fd",
+ },
+};
+
+int main(void)
+{
+ size_t fails = 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(sysparm_tests); ++i) {
+ const struct sysparm_test *t = &sysparm_tests[i];
+
+ if (test_harness(t->function, t->description))
+ ++fails;
+ }
+
+ return fails == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tools/testing/selftests/powerpc/papr_vpd/.gitignore b/tools/testing/selftests/powerpc/papr_vpd/.gitignore
new file mode 100644
index 000000000000..49285031a656
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_vpd/.gitignore
@@ -0,0 +1 @@
+/papr_vpd
diff --git a/tools/testing/selftests/powerpc/papr_vpd/Makefile b/tools/testing/selftests/powerpc/papr_vpd/Makefile
new file mode 100644
index 000000000000..06b719703bfd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_vpd/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+noarg:
+ $(MAKE) -C ../
+
+TEST_GEN_PROGS := papr_vpd
+
+top_srcdir = ../../../../..
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
+
+$(OUTPUT)/papr_vpd: CFLAGS += $(KHDR_INCLUDES)
diff --git a/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
new file mode 100644
index 000000000000..98cbb9109ee6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <asm/papr-vpd.h>
+
+#include "utils.h"
+
+#define DEVPATH "/dev/papr-vpd"
+
+static int dev_papr_vpd_open_close(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+ FAIL_IF(close(devfd) != 0);
+
+ return 0;
+}
+
+static int dev_papr_vpd_get_handle_all(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ struct papr_location_code lc = { .str = "", };
+ off_t size;
+ int fd;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(errno != 0);
+ FAIL_IF(fd < 0);
+
+ FAIL_IF(close(devfd) != 0);
+
+ size = lseek(fd, 0, SEEK_END);
+ FAIL_IF(size <= 0);
+
+ void *buf = malloc((size_t)size);
+ FAIL_IF(!buf);
+
+ ssize_t consumed = pread(fd, buf, size, 0);
+ FAIL_IF(consumed != size);
+
+ /* Ensure EOF */
+ FAIL_IF(read(fd, buf, size) != 0);
+ FAIL_IF(close(fd));
+
+ /* Verify that the buffer looks like VPD */
+ static const char needle[] = "System VPD";
+ FAIL_IF(!memmem(buf, size, needle, strlen(needle)));
+
+ return 0;
+}
+
+static int dev_papr_vpd_get_handle_byte_at_a_time(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ struct papr_location_code lc = { .str = "", };
+ int fd;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(errno != 0);
+ FAIL_IF(fd < 0);
+
+ FAIL_IF(close(devfd) != 0);
+
+ size_t consumed = 0;
+ while (1) {
+ ssize_t res;
+ char c;
+
+ errno = 0;
+ res = read(fd, &c, sizeof(c));
+ FAIL_IF(res > sizeof(c));
+ FAIL_IF(res < 0);
+ FAIL_IF(errno != 0);
+ consumed += res;
+ if (res == 0)
+ break;
+ }
+
+ FAIL_IF(consumed != lseek(fd, 0, SEEK_END));
+
+ FAIL_IF(close(fd));
+
+ return 0;
+}
+
+
+static int dev_papr_vpd_unterm_loc_code(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ struct papr_location_code lc = {};
+ int fd;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ /*
+ * Place a non-null byte in every element of loc_code; the
+ * driver should reject this input.
+ */
+ memset(lc.str, 'x', ARRAY_SIZE(lc.str));
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(fd != -1);
+ FAIL_IF(errno != EINVAL);
+
+ FAIL_IF(close(devfd) != 0);
+ return 0;
+}
+
+static int dev_papr_vpd_null_handle(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ int rc;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ rc = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, NULL);
+ FAIL_IF(rc != -1);
+ FAIL_IF(errno != EFAULT);
+
+ FAIL_IF(close(devfd) != 0);
+ return 0;
+}
+
+static int papr_vpd_close_handle_without_reading(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ struct papr_location_code lc;
+ int fd;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(errno != 0);
+ FAIL_IF(fd < 0);
+
+ /* close the handle without reading it */
+ FAIL_IF(close(fd) != 0);
+
+ FAIL_IF(close(devfd) != 0);
+ return 0;
+}
+
+static int papr_vpd_reread(void)
+{
+ const int devfd = open(DEVPATH, O_RDONLY);
+ struct papr_location_code lc = { .str = "", };
+ int fd;
+
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(errno != 0);
+ FAIL_IF(fd < 0);
+
+ FAIL_IF(close(devfd) != 0);
+
+ const off_t size = lseek(fd, 0, SEEK_END);
+ FAIL_IF(size <= 0);
+
+ char *bufs[2];
+
+ for (size_t i = 0; i < ARRAY_SIZE(bufs); ++i) {
+ bufs[i] = malloc(size);
+ FAIL_IF(!bufs[i]);
+ ssize_t consumed = pread(fd, bufs[i], size, 0);
+ FAIL_IF(consumed != size);
+ }
+
+ FAIL_IF(memcmp(bufs[0], bufs[1], size));
+
+ FAIL_IF(close(fd) != 0);
+
+ return 0;
+}
+
+static int get_system_loc_code(struct papr_location_code *lc)
+{
+ static const char system_id_path[] = "/sys/firmware/devicetree/base/system-id";
+ static const char model_path[] = "/sys/firmware/devicetree/base/model";
+ char *system_id;
+ char *model;
+ int err = -1;
+
+ if (read_file_alloc(model_path, &model, NULL))
+ return err;
+
+ if (read_file_alloc(system_id_path, &system_id, NULL))
+ goto free_model;
+
+ char *mtm;
+ int sscanf_ret = sscanf(model, "IBM,%ms", &mtm);
+ if (sscanf_ret != 1)
+ goto free_system_id;
+
+ char *plant_and_seq;
+ if (sscanf(system_id, "IBM,%*c%*c%ms", &plant_and_seq) != 1)
+ goto free_mtm;
+ /*
+ * Replace - with . to build location code.
+ */
+ char *sep = strchr(mtm, '-');
+ if (!sep)
+ goto free_mtm;
+ else
+ *sep = '.';
+
+ snprintf(lc->str, sizeof(lc->str),
+ "U%s.%s", mtm, plant_and_seq);
+ err = 0;
+
+ free(plant_and_seq);
+free_mtm:
+ free(mtm);
+free_system_id:
+ free(system_id);
+free_model:
+ free(model);
+ return err;
+}
+
+static int papr_vpd_system_loc_code(void)
+{
+ struct papr_location_code lc;
+ const int devfd = open(DEVPATH, O_RDONLY);
+ off_t size;
+ int fd;
+
+ SKIP_IF_MSG(get_system_loc_code(&lc),
+ "Cannot determine system location code");
+ SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
+ DEVPATH " not present");
+
+ FAIL_IF(devfd < 0);
+
+ errno = 0;
+ fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc);
+ FAIL_IF(errno != 0);
+ FAIL_IF(fd < 0);
+
+ FAIL_IF(close(devfd) != 0);
+
+ size = lseek(fd, 0, SEEK_END);
+ FAIL_IF(size <= 0);
+
+ void *buf = malloc((size_t)size);
+ FAIL_IF(!buf);
+
+ ssize_t consumed = pread(fd, buf, size, 0);
+ FAIL_IF(consumed != size);
+
+ /* Ensure EOF */
+ FAIL_IF(read(fd, buf, size) != 0);
+ FAIL_IF(close(fd));
+
+ /* Verify that the buffer looks like VPD */
+ static const char needle[] = "System VPD";
+ FAIL_IF(!memmem(buf, size, needle, strlen(needle)));
+
+ return 0;
+}
+
+struct vpd_test {
+ int (*function)(void);
+ const char *description;
+};
+
+static const struct vpd_test vpd_tests[] = {
+ {
+ .function = dev_papr_vpd_open_close,
+ .description = "open/close " DEVPATH,
+ },
+ {
+ .function = dev_papr_vpd_unterm_loc_code,
+ .description = "ensure EINVAL on unterminated location code",
+ },
+ {
+ .function = dev_papr_vpd_null_handle,
+ .description = "ensure EFAULT on bad handle addr",
+ },
+ {
+ .function = dev_papr_vpd_get_handle_all,
+ .description = "get handle for all VPD"
+ },
+ {
+ .function = papr_vpd_close_handle_without_reading,
+ .description = "close handle without consuming VPD"
+ },
+ {
+ .function = dev_papr_vpd_get_handle_byte_at_a_time,
+ .description = "read all VPD one byte at a time"
+ },
+ {
+ .function = papr_vpd_reread,
+ .description = "ensure re-read yields same results"
+ },
+ {
+ .function = papr_vpd_system_loc_code,
+ .description = "get handle for system VPD"
+ },
+};
+
+int main(void)
+{
+ size_t fails = 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(vpd_tests); ++i) {
+ const struct vpd_test *t = &vpd_tests[i];
+
+ if (test_harness(t->function, t->description))
+ ++fails;
+ }
+
+ return fails == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c
index 031b258667d8..7f5dd2b87f15 100644
--- a/tools/thermal/tmon/tui.c
+++ b/tools/thermal/tmon/tui.c
@@ -213,7 +213,7 @@ void show_cooling_device(void)
* cooling device instances. skip unused idr.
*/
mvwprintw(cooling_device_window, j + 2, 1,
- "%02d %12.12s%6d %6d",
+ "%02d %12.12s%6lu %6lu",
ptdata.cdi[j].instance,
ptdata.cdi[j].type,
ptdata.cdi[j].cur_state,
diff --git a/usr/Kconfig b/usr/Kconfig
index 8bbcf699fe3b..9279a2893ab0 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -185,9 +185,9 @@ config INITRAMFS_COMPRESSION_LZO
bool "LZO"
depends on RD_LZO
help
- It's compression ratio is the second poorest amongst the choices. The
- kernel size is about 10% bigger than gzip. Despite that, it's
- decompression speed is the second fastest and it's compression speed
+ Its compression ratio is the second poorest amongst the choices. The
+ kernel size is about 10% bigger than gzip. Despite that, its
+ decompression speed is the second fastest and its compression speed
is quite fast too.
If you choose this, keep in mind that you may need to install the lzop
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 89912a17f5d5..c0e230f4c3e9 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -61,7 +61,7 @@ static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
srcu_read_lock_held(&resampler->kvm->irq_srcu))
- eventfd_signal(irqfd->resamplefd, 1);
+ eventfd_signal(irqfd->resamplefd);
}
/*
@@ -786,7 +786,7 @@ ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
return 0;
}