summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.editorconfig3
-rw-r--r--.mailmap13
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem30
-rw-r--r--Documentation/ABI/stable/sysfs-driver-misc-cp50025
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-inv_icm4260018
-rw-r--r--Documentation/admin-guide/LSM/tomoyo.rst35
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt47
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst4
-rw-r--r--Documentation/arch/riscv/uabi.rst4
-rw-r--r--Documentation/cdrom/cdrom-standard.rst4
-rw-r--r--Documentation/core-api/swiotlb.rst2
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml6
-rw-r--r--Documentation/devicetree/bindings/counter/ti-eqep.yaml27
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adc.yaml30
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml194
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml95
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml148
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mediatek,mt6359-auxadc.yaml33
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads1119.yaml155
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/sciosense,ens160.yaml70
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml45
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml31
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/st,st-sensors.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/elan,ekth6915.yaml19
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili2901.yaml66
-rw-r--r--Documentation/devicetree/bindings/misc/qemu,vcpu-stall-detector.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml11
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml18
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rts5411.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/driver-api/dmaengine/client.rst9
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst10
-rw-r--r--Documentation/driver-api/driver-model/devres.rst3
-rw-r--r--Documentation/filesystems/proc.rst1
-rw-r--r--Documentation/i2c/i2c_bus.svg15
-rw-r--r--Documentation/i2c/summary.rst79
-rw-r--r--Documentation/iio/adis16475.rst23
-rw-r--r--Documentation/iio/adis16480.rst443
-rw-r--r--Documentation/iio/iio_dmabuf_api.rst54
-rw-r--r--Documentation/iio/iio_tools.rst27
-rw-r--r--Documentation/iio/index.rst3
-rw-r--r--Documentation/kbuild/kconfig-language.rst12
-rw-r--r--Documentation/kbuild/modules.rst8
-rw-r--r--Documentation/netlink/specs/ethtool.yaml7
-rw-r--r--Documentation/netlink/specs/netdev.yaml4
-rw-r--r--Documentation/netlink/specs/nfsd.yaml2
-rw-r--r--Documentation/networking/af_xdp.rst33
-rw-r--r--Documentation/process/maintainer-netdev.rst2
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/dev-subdev.rst2
-rw-r--r--Documentation/userspace-api/mfd_noexec.rst86
-rw-r--r--Documentation/virt/hyperv/clocks.rst21
-rw-r--r--Documentation/virt/hyperv/overview.rst22
-rw-r--r--Documentation/virt/hyperv/vmbus.rst143
-rw-r--r--MAINTAINERS96
-rw-r--r--Makefile2
-rw-r--r--arch/arc/net/bpf_jit.h2
-rw-r--r--arch/arc/net/bpf_jit_arcv2.c10
-rw-r--r--arch/arc/net/bpf_jit_core.c22
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso6
-rw-r--r--arch/arm/include/asm/efi.h13
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts1
-rw-r--r--arch/arm64/include/asm/el2_setup.h6
-rw-r--r--arch/arm64/include/asm/io.h36
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h71
-rw-r--r--arch/arm64/include/asm/kvm_host.h25
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h9
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/pi/map_kernel.c2
-rw-r--r--arch/arm64/kernel/syscall.c16
-rw-r--r--arch/arm64/kvm/arm.c76
-rw-r--r--arch/arm64/kvm/emulate-nested.c21
-rw-r--r--arch/arm64/kvm/fpsimd.c11
-rw-r--r--arch/arm64/kvm/guest.c3
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c18
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h36
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c84
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c17
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c25
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c24
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c12
-rw-r--r--arch/arm64/kvm/nested.c6
-rw-r--r--arch/arm64/kvm/reset.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c15
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/arm64/mm/contpte.c4
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/csky/include/uapi/asm/unistd.h1
-rw-r--r--arch/csky/kernel/syscall.c2
-rw-r--r--arch/hexagon/include/asm/syscalls.h6
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h1
-rw-r--r--arch/hexagon/kernel/syscalltab.c7
-rw-r--r--arch/loongarch/Kconfig5
-rw-r--r--arch/loongarch/Kconfig.debug1
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts2
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h4
-rw-r--r--arch/loongarch/include/asm/numa.h1
-rw-r--r--arch/loongarch/include/asm/stackframe.h2
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c96
-rw-r--r--arch/loongarch/kernel/ptrace.c47
-rw-r--r--arch/loongarch/kernel/setup.c6
-rw-r--r--arch/loongarch/kernel/smp.c5
-rw-r--r--arch/loongarch/kernel/syscall.c2
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S10
-rw-r--r--arch/loongarch/kvm/exit.c2
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c2
-rw-r--r--arch/mips/bmips/setup.c3
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl4
-rw-r--r--arch/mips/pci/ops-rc32434.c4
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/cacheflush.h15
-rw-r--r--arch/parisc/include/asm/pgtable.h27
-rw-r--r--arch/parisc/kernel/cache.c413
-rw-r--r--arch/parisc/kernel/sys_parisc32.c9
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/crypto/.gitignore2
-rw-r--r--arch/powerpc/include/asm/uaccess.h27
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c18
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c4
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts1
-rw-r--r--arch/riscv/include/asm/cmpxchg.h22
-rw-r--r--arch/riscv/include/asm/insn.h2
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c2
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c3
-rw-r--r--arch/riscv/kernel/ftrace.c7
-rw-r--r--arch/riscv/kernel/patch.c26
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/kernel/sys_riscv.c4
-rw-r--r--arch/riscv/kvm/aia_device.c7
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c4
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c21
-rw-r--r--arch/s390/boot/startup.c38
-rw-r--r--arch/s390/boot/vmem.c12
-rw-r--r--arch/s390/boot/vmlinux.lds.S1
-rw-r--r--arch/s390/configs/debug_defconfig43
-rw-r--r--arch/s390/configs/defconfig40
-rw-r--r--arch/s390/configs/zfcpdump_defconfig5
-rw-r--r--arch/s390/include/asm/entry-common.h2
-rw-r--r--arch/s390/kernel/crash_dump.c54
-rw-r--r--arch/s390/kernel/syscall.c27
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/pci/pci_irq.c2
-rw-r--r--arch/sh/kernel/sys_sh32.c11
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl3
-rw-r--r--arch/sparc/kernel/sys32.S221
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl8
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/events/intel/cstate.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/events/rapl.c1
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h12
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/entry-common.h15
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/vmxfeatures.h2
-rw-r--r--arch/x86/kernel/amd_nb.c9
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c1
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c25
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c11
-rw-r--r--arch/x86/kernel/time.c20
-rw-r--r--arch/x86/kvm/Kconfig11
-rw-r--r--arch/x86/kvm/lapic.c39
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c46
-rw-r--r--arch/x86/kvm/mmu/spte.h9
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c2
-rw-r--r--arch/x86/kvm/svm/sev.c19
-rw-r--r--arch/x86/kvm/svm/svm.c69
-rw-r--r--arch/x86/kvm/svm/svm.h4
-rw-r--r--arch/x86/kvm/vmx/nested.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c20
-rw-r--r--arch/x86/lib/getuser.S6
-rw-r--r--arch/x86/mm/numa.c6
-rw-r--r--arch/x86/platform/efi/memmap.c12
-rw-r--r--block/bio-integrity.c26
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-settings.c10
-rw-r--r--block/blk-stat.h1
-rw-r--r--block/blk-throttle.c24
-rw-r--r--block/blk-throttle.h8
-rw-r--r--block/blk-zoned.c50
-rw-r--r--block/sed-opal.c2
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/evregion.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c54
-rw-r--r--drivers/acpi/acpica/exregion.c23
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/ec.c12
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/mipi-disco-img.c28
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/x86/utils.c24
-rw-r--r--drivers/android/binder.c112
-rw-r--r--drivers/android/binder_alloc.c10
-rw-r--r--drivers/android/binder_internal.h5
-rw-r--r--drivers/android/dbitmap.h176
-rw-r--r--drivers/ata/ahci.c26
-rw-r--r--drivers/ata/libata-core.c39
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/pata_parport/pata_parport.c1
-rw-r--r--drivers/auxdisplay/ks0108.c1
-rw-r--r--drivers/auxdisplay/panel.c1
-rw-r--r--drivers/base/core.c48
-rw-r--r--drivers/base/regmap/regmap-i2c.c3
-rw-r--r--drivers/block/loop.c23
-rw-r--r--drivers/block/nbd.c51
-rw-r--r--drivers/block/null_blk/main.c5
-rw-r--r--drivers/block/null_blk/zoned.c13
-rw-r--r--drivers/bus/mhi/ep/main.c14
-rw-r--r--drivers/bus/mhi/host/pci_generic.c122
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/dsp56k.c1
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/tlclk.c1
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm-buf.c26
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c10
-rw-r--r--drivers/char/tpm/tpm2-sessions.c21
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c3
-rw-r--r--drivers/char/ttyprintk.c1
-rw-r--r--drivers/clk/clkdev.c11
-rw-r--r--drivers/clk/sifive/sifive-prci.c8
-rw-r--r--drivers/counter/Kconfig2
-rw-r--r--drivers/counter/ftm-quaddec.c1
-rw-r--r--drivers/counter/ti-eqep.c137
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c3
-rw-r--r--drivers/cpufreq/amd-pstate.c36
-rw-r--r--drivers/cpufreq/amd-pstate.h (renamed from include/linux/amd-pstate.h)33
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile5
-rw-r--r--drivers/cxl/core/region.c18
-rw-r--r--drivers/dca/dca-core.c1
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sync_debug.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dma-axi-dmac.c40
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/ioat/init.c55
-rw-r--r--drivers/dma/ti/k3-udma-glue.c5
-rw-r--r--drivers/dma/xilinx/xdma.c4
-rw-r--r--drivers/edac/amd64_edac.c8
-rw-r--r--drivers/edac/igen6_edac.c4
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-card.c6
-rw-r--r--drivers/firewire/core-cdev.c6
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c30
-rw-r--r--drivers/firewire/packet-serdes-test.c1
-rw-r--r--drivers/firewire/uapi-test.c1
-rw-r--r--drivers/firmware/efi/efi-pstore.c8
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c2
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds1
-rw-r--r--drivers/firmware/efi/memmap.c9
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c13
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/fpga/altera-fpga2sdram.c6
-rw-r--r--drivers/fpga/tests/Kconfig4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-gw-pld.c1
-rw-r--r--drivers/gpio/gpio-mc33880.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c1
-rw-r--r--drivers/gpio/gpio-pl061.c1
-rw-r--r--drivers/gpio/gpio-tqmx86.c110
-rw-r--r--drivers/gpio/gpiolib-cdev.c28
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c10
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h91
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/drm_buddy.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c5
-rw-r--r--drivers/gpu/drm/drm_file.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c24
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c20
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c60
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c18
-rw-r--r--drivers/greybus/core.c1
-rw-r--r--drivers/greybus/es2.c1
-rw-r--r--drivers/hid/hid-asus.c4
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c1
-rw-r--r--drivers/hid/hid-nintendo.c6
-rw-r--r--drivers/hid/hid-nvidia-shield.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c59
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c79
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h33
-rw-r--r--drivers/hv/hv.c37
-rw-r--r--drivers/hv/hv_balloon.c190
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c8
-rw-r--r--drivers/hwmon/iio_hwmon.c45
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c2
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/peci/cputemp.c8
-rw-r--r--drivers/hwmon/shtc1.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c1
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c1
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c11
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.c71
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.h2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c36
-rw-r--r--drivers/i2c/busses/i2c-viai2c-zhaoxin.c113
-rw-r--r--drivers/i2c/i2c-slave-testunit.c5
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/adxl313_spi.c8
-rw-r--r--drivers/iio/accel/adxl355_spi.c10
-rw-r--r--drivers/iio/accel/adxl367_i2c.c4
-rw-r--r--drivers/iio/accel/adxl372_i2c.c2
-rw-r--r--drivers/iio/accel/bma400_i2c.c2
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c5
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/dmard06.c6
-rw-r--r--drivers/iio/accel/dmard09.c4
-rw-r--r--drivers/iio/accel/dmard10.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c18
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c4
-rw-r--r--drivers/iio/accel/kxsd9.c5
-rw-r--r--drivers/iio/accel/mc3230.c2
-rw-r--r--drivers/iio/accel/mma7455_i2c.c4
-rw-r--r--drivers/iio/accel/mma7660.c52
-rw-r--r--drivers/iio/accel/mma9551.c2
-rw-r--r--drivers/iio/accel/mma9553.c4
-rw-r--r--drivers/iio/accel/msa311.c8
-rw-r--r--drivers/iio/accel/mxc4005.c6
-rw-r--r--drivers/iio/accel/mxc6255.c4
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c81
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c5
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c2
-rw-r--r--drivers/iio/adc/Kconfig52
-rw-r--r--drivers/iio/adc/Makefile27
-rw-r--r--drivers/iio/adc/ad4130.c4
-rw-r--r--drivers/iio/adc/ad7124.c14
-rw-r--r--drivers/iio/adc/ad7173.c693
-rw-r--r--drivers/iio/adc/ad7192.c359
-rw-r--r--drivers/iio/adc/ad7266.c35
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/ad7292.c36
-rw-r--r--drivers/iio/adc/ad7380.c833
-rw-r--r--drivers/iio/adc/ad7606.c19
-rw-r--r--drivers/iio/adc/ad7793.c24
-rw-r--r--drivers/iio/adc/ad7944.c88
-rw-r--r--drivers/iio/adc/ad9467.c107
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c1
-rw-r--r--drivers/iio/adc/adi-axi-adc.c27
-rw-r--r--drivers/iio/adc/aspeed_adc.c30
-rw-r--r--drivers/iio/adc/axp20x_adc.c284
-rw-r--r--drivers/iio/adc/axp288_adc.c4
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c8
-rw-r--r--drivers/iio/adc/berlin2-adc.c24
-rw-r--r--drivers/iio/adc/cpcap-adc.c46
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c16
-rw-r--r--drivers/iio/adc/hx711.c78
-rw-r--r--drivers/iio/adc/ina2xx-adc.c3
-rw-r--r--drivers/iio/adc/ingenic-adc.c1
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c4
-rw-r--r--drivers/iio/adc/ltc2309.c45
-rw-r--r--drivers/iio/adc/ltc2485.c2
-rw-r--r--drivers/iio/adc/max11205.c5
-rw-r--r--drivers/iio/adc/max1363.c28
-rw-r--r--drivers/iio/adc/mcp3564.c6
-rw-r--r--drivers/iio/adc/meson_saradc.c101
-rw-r--r--drivers/iio/adc/mp2629_adc.c19
-rw-r--r--drivers/iio/adc/mt6359-auxadc.c606
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/adc/pac1934.c5
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c50
-rw-r--r--drivers/iio/adc/rn5t618-adc.c5
-rw-r--r--drivers/iio/adc/sc27xx_adc.c41
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c29
-rw-r--r--drivers/iio/adc/ti-adc108s102.c28
-rw-r--r--drivers/iio/adc/ti-adc161s626.c18
-rw-r--r--drivers/iio/adc/ti-ads1119.c825
-rw-r--r--drivers/iio/adc/ti-ads131e08.c4
-rw-r--r--drivers/iio/adc/ti-ads7924.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c59
-rw-r--r--drivers/iio/adc/ti-tsc2046.c7
-rw-r--r--drivers/iio/adc/xilinx-ams.c116
-rw-r--r--drivers/iio/addac/ad74413r.c13
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c178
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c62
-rw-r--r--drivers/iio/buffer/kfifo_buf.c1
-rw-r--r--drivers/iio/chemical/Kconfig20
-rw-r--r--drivers/iio/chemical/Makefile3
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c4
-rw-r--r--drivers/iio/chemical/bme680.h2
-rw-r--r--drivers/iio/chemical/bme680_core.c62
-rw-r--r--drivers/iio/chemical/bme680_i2c.c4
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/ens160.h10
-rw-r--r--drivers/iio/chemical/ens160_core.c367
-rw-r--r--drivers/iio/chemical/ens160_i2c.c62
-rw-r--r--drivers/iio/chemical/ens160_spi.c61
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c6
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c45
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c3
-rw-r--r--drivers/iio/dac/Kconfig3
-rw-r--r--drivers/iio/dac/ad3552r.c168
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/adi-axi-dac.c9
-rw-r--r--drivers/iio/dac/ltc2688.c5
-rw-r--r--drivers/iio/dac/max5522.c11
-rw-r--r--drivers/iio/dac/mcp4728.c2
-rw-r--r--drivers/iio/dac/stm32-dac-core.c5
-rw-r--r--drivers/iio/frequency/adf4350.c124
-rw-r--r--drivers/iio/frequency/adrf6780.c1
-rw-r--r--drivers/iio/gyro/adis16136.c26
-rw-r--r--drivers/iio/gyro/adis16260.c19
-rw-r--r--drivers/iio/gyro/bmg160_core.c4
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c6
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c2
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c35
-rw-r--r--drivers/iio/health/afe4403.c9
-rw-r--r--drivers/iio/health/afe4404.c11
-rw-r--r--drivers/iio/health/max30100.c7
-rw-r--r--drivers/iio/health/max30102.c5
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c12
-rw-r--r--drivers/iio/humidity/hdc3020.c325
-rw-r--r--drivers/iio/humidity/si7005.c4
-rw-r--r--drivers/iio/humidity/si7020.c141
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/imu/Kconfig4
-rw-r--r--drivers/iio/imu/adis.c11
-rw-r--r--drivers/iio/imu/adis16400.c72
-rw-r--r--drivers/iio/imu/adis16475.c821
-rw-r--r--drivers/iio/imu/adis16480.c456
-rw-r--r--drivers/iio/imu/adis_buffer.c73
-rw-r--r--drivers/iio/imu/adis_trigger.c37
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c26
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c5
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c3
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c13
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c2
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c128
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c33
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c81
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/industrialio-backend.c8
-rw-r--r--drivers/iio/industrialio-buffer.c588
-rw-r--r--drivers/iio/industrialio-core.c30
-rw-r--r--drivers/iio/industrialio-event.c13
-rw-r--r--drivers/iio/industrialio-gts-helper.c7
-rw-r--r--drivers/iio/inkern.c40
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/adux1020.c15
-rw-r--r--drivers/iio/light/al3320a.c2
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/bh1780.c4
-rw-r--r--drivers/iio/light/cm3232.c2
-rw-r--r--drivers/iio/light/cm3323.c2
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap002.c4
-rw-r--r--drivers/iio/light/gp2ap020a00f.c3
-rw-r--r--drivers/iio/light/iqs621-als.c4
-rw-r--r--drivers/iio/light/isl29018.c6
-rw-r--r--drivers/iio/light/isl29028.c4
-rw-r--r--drivers/iio/light/isl29125.c2
-rw-r--r--drivers/iio/light/jsa1212.c2
-rw-r--r--drivers/iio/light/lv0104cs.c2
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/light/max44009.c2
-rw-r--r--drivers/iio/light/noa1305.c2
-rw-r--r--drivers/iio/light/opt3001.c2
-rw-r--r--drivers/iio/light/pa12203001.c2
-rw-r--r--drivers/iio/light/rohm-bu27034.c6
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/si1133.c2
-rw-r--r--drivers/iio/light/st_uvis25_core.c4
-rw-r--r--drivers/iio/light/stk3310.c37
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/light/tsl4531.c2
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/light/veml6030.c6
-rw-r--r--drivers/iio/light/veml6040.c281
-rw-r--r--drivers/iio/light/veml6070.c2
-rw-r--r--drivers/iio/light/vl6180.c2
-rw-r--r--drivers/iio/light/zopt2201.c2
-rw-r--r--drivers/iio/magnetometer/af8133j.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c18
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c6
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/mmc35240.c10
-rw-r--r--drivers/iio/magnetometer/tmag5273.c2
-rw-r--r--drivers/iio/multiplexer/iio-mux.c1
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c797
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c8
-rw-r--r--drivers/iio/pressure/bmp280-spi.c4
-rw-r--r--drivers/iio/pressure/bmp280.h65
-rw-r--r--drivers/iio/pressure/dps310.c2
-rw-r--r--drivers/iio/pressure/hp03.c4
-rw-r--r--drivers/iio/pressure/icp10100.c2
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/t5403.c2
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c4
-rw-r--r--drivers/iio/proximity/isl29501.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c6
-rw-r--r--drivers/iio/proximity/rfd77402.c2
-rw-r--r--drivers/iio/proximity/sx9324.c5
-rw-r--r--drivers/iio/proximity/sx9360.c5
-rw-r--r--drivers/iio/proximity/sx9500.c16
-rw-r--r--drivers/iio/proximity/sx_common.c9
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c2
-rw-r--r--drivers/iio/temperature/ltc2983.c262
-rw-r--r--drivers/iio/temperature/max30208.c1
-rw-r--r--drivers/iio/temperature/mcp9600.c363
-rw-r--r--drivers/iio/temperature/mlx90632.c6
-rw-r--r--drivers/iio/temperature/mlx90635.c6
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/temperature/tmp007.c2
-rw-r--r--drivers/iio/temperature/tsys01.c2
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/iio/test/iio-test-gts.c8
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c34
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/mana/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/walkera0701.c1
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/mouse/elantech.c31
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h18
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c12
-rw-r--r--drivers/input/touchscreen/ili210x.c4
-rw-r--r--drivers/input/touchscreen/silead.c19
-rw-r--r--drivers/iommu/amd/amd_iommu.h3
-rw-r--r--drivers/iommu/amd/init.c12
-rw-r--r--drivers/iommu/amd/iommu.c60
-rw-r--r--drivers/iommu/amd/ppr.c25
-rw-r--r--drivers/iommu/dma-iommu.c8
-rw-r--r--drivers/iommu/intel/iommu.c20
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c44
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c5
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-riscv-intc.c9
-rw-r--r--drivers/irqchip/irq-sifive-plic.c34
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c1
-rw-r--r--drivers/mcb/mcb-parse.c5
-rw-r--r--drivers/mcb/mcb-pci.c16
-rw-r--r--drivers/md/bcache/alloc.c21
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/btree.c7
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/dm-table.c15
-rw-r--r--drivers/md/dm-zone.c72
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c71
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c5
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c5
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/mfd/axp20x.c1
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c2
-rw-r--r--drivers/misc/apds990x.c12
-rw-r--r--drivers/misc/bh1770glc.c14
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c46
-rw-r--r--drivers/misc/eeprom/ee1004.c131
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c178
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c104
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/fastrpc.c46
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/isl29003.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/keba/Kconfig12
-rw-r--r--drivers/misc/keba/Makefile3
-rw-r--r--drivers/misc/keba/cp500.c458
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c9
-rw-r--r--drivers/misc/mei/bus-fixup.c8
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c39
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c2
-rw-r--r--drivers/misc/open-dice.c1
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/misc/tifm_7xx1.c6
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vcpu_stall_detector.c31
-rw-r--r--drivers/mmc/host/moxart-mmc.c78
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c41
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c14
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c55
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h5
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c61
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h11
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c15
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h311
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c258
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c9
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c104
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c20
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c44
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c48
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h28
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c54
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c55
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/baycom_par.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/netkit.c30
-rw-r--r--drivers/net/phy/dp83tg720.c38
-rw-r--r--drivers/net/phy/micrel.c116
-rw-r--r--drivers/net/phy/mxl-gpy.c58
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/pse-pd/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc95xx.c11
-rw-r--r--drivers/net/virtio_net.c74
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c38
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c41
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c43
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h12
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c2
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c18
-rw-r--r--drivers/nfc/virtual_ncidev.c4
-rw-r--r--drivers/nvme/host/apple.c1
-rw-r--r--drivers/nvme/host/core.c120
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/ioctl.c30
-rw-r--r--drivers/nvme/host/multipath.c26
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/target/configfs.c49
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c3
-rw-r--r--drivers/nvme/target/fabrics-cmd.c6
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/nvmem/apple-efuses.c1
-rw-r--r--drivers/nvmem/brcm_nvram.c1
-rw-r--r--drivers/nvmem/core.c90
-rw-r--r--drivers/nvmem/meson-efuse.c5
-rw-r--r--drivers/nvmem/rockchip-efuse.c1
-rw-r--r--drivers/nvmem/rockchip-otp.c2
-rw-r--r--drivers/nvmem/u-boot-env.c1
-rw-r--r--drivers/of/irq.c125
-rw-r--r--drivers/of/of_private.h3
-rw-r--r--drivers/of/of_test.c1
-rw-r--r--drivers/of/property.c30
-rw-r--r--drivers/parport/daisy.c1
-rw-r--r--drivers/parport/parport_amiga.c8
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/pci/access.c4
-rw-r--r--drivers/pci/msi/msi.c10
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/peci/controller/peci-aspeed.c1
-rw-r--r--drivers/peci/core.c5
-rw-r--r--drivers/peci/cpu.c21
-rw-r--r--drivers/peci/device.c3
-rw-r--r--drivers/peci/internal.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c189
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c68
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c4
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c1
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c5
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp.c50
-rw-r--r--drivers/platform/x86/amilo-rfkill.c1
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c103
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c1
-rw-r--r--drivers/platform/x86/ibm_rtl.c1
-rw-r--r--drivers/platform/x86/intel/hid.c1
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c1
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c2
-rw-r--r--drivers/platform/x86/intel/vbtn.c1
-rw-r--r--drivers/platform/x86/lg-laptop.c89
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc.c1
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c170
-rw-r--r--drivers/platform/x86/uv_sysfs.c1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c3
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig2
-rw-r--r--drivers/platform/x86/xo1-rfkill.c1
-rw-r--r--drivers/pmdomain/imx/gpcv2.c11
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/driver.c6
-rw-r--r--drivers/pps/clients/pps_parport.c1
-rw-r--r--drivers/pps/generators/pps_gen_parport.c1
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/pwm/pwm-stm32.c23
-rw-r--r--drivers/ras/amd/atl/internal.h2
-rw-r--r--drivers/ras/amd/atl/system.c2
-rw-r--r--drivers/ras/amd/atl/umc.c160
-rw-r--r--drivers/regulator/axp20x-regulator.c33
-rw-r--r--drivers/regulator/bd71815-regulator.c2
-rw-r--r--drivers/regulator/core.c1
-rw-r--r--drivers/regulator/rtq2208-regulator.c22
-rw-r--r--drivers/regulator/tps6594-regulator.c12
-rw-r--r--drivers/s390/char/sclp.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c6
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c62
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c14
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c47
-rw-r--r--drivers/scsi/scsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c23
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sr.h2
-rw-r--r--drivers/scsi/sr_ioctl.c5
-rw-r--r--drivers/siox/siox-bus-gpio.c1
-rw-r--r--drivers/slimbus/stream.c8
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c4
-rw-r--r--drivers/soundwire/amd_manager.c3
-rw-r--r--drivers/soundwire/intel_auxdevice.c6
-rw-r--r--drivers/soundwire/mipi_disco.c30
-rw-r--r--drivers/spi/spi-butterfly.c1
-rw-r--r--drivers/spi/spi-cadence-xspi.c20
-rw-r--r--drivers/spi/spi-cs42l43.c6
-rw-r--r--drivers/spi/spi-imx.c14
-rw-r--r--drivers/spi/spi-lm70llp.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c12
-rw-r--r--drivers/spi/spi-stm32.c16
-rw-r--r--drivers/spi/spi.c75
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c12
-rw-r--r--drivers/staging/iio/addac/adt7316.c9
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c10
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c7
-rw-r--r--drivers/thermal/gov_step_wise.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c12
-rw-r--r--drivers/thermal/thermal_core.c75
-rw-r--r--drivers/thermal/thermal_core.h6
-rw-r--r--drivers/thermal/thermal_debugfs.c18
-rw-r--r--drivers/thermal/thermal_trip.c20
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h33
-rw-r--r--drivers/tty/serial/8250/8250_omap.c25
-rw-r--r--drivers/tty/serial/8250/8250_pci.c13
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c7
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/serial_base.h30
-rw-r--r--drivers/tty/serial/serial_base_bus.c129
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/serial_port.c7
-rw-r--r--drivers/ufs/core/ufs-mcq.c17
-rw-r--r--drivers/ufs/core/ufshcd.c7
-rw-r--r--drivers/uio/uio.c1
-rw-r--r--drivers/uio/uio_aec.c1
-rw-r--r--drivers/uio/uio_cif.c1
-rw-r--r--drivers/uio/uio_mf624.c3
-rw-r--r--drivers/uio/uio_netx.c1
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c14
-rw-r--r--drivers/usb/chipidea/core.c8
-rw-r--r--drivers/usb/chipidea/ulpi.c5
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/dwc3/core.c26
-rw-r--r--drivers/usb/gadget/function/f_printer.c40
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c59
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/da8xx.c8
-rw-r--r--drivers/usb/storage/alauda.c9
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c61
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c19
-rw-r--r--drivers/vfio/device_cdev.c7
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c271
-rw-r--r--drivers/vfio/vfio_main.c44
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c1
-rw-r--r--drivers/watchdog/ts4800_wdt.c1
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--fs/9p/vfs_dentry.c9
-rw-r--r--fs/9p/vfs_inode.c1
-rw-r--r--fs/afs/inode.c1
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/bcachefs/alloc_background.c311
-rw-r--r--fs/bcachefs/alloc_background.h14
-rw-r--r--fs/bcachefs/alloc_foreground.c4
-rw-r--r--fs/bcachefs/backpointers.c2
-rw-r--r--fs/bcachefs/bcachefs.h68
-rw-r--r--fs/bcachefs/bcachefs_format.h208
-rw-r--r--fs/bcachefs/bkey.c2
-rw-r--r--fs/bcachefs/bkey_methods.c6
-rw-r--r--fs/bcachefs/bkey_methods.h3
-rw-r--r--fs/bcachefs/btree_cache.c9
-rw-r--r--fs/bcachefs/btree_gc.c35
-rw-r--r--fs/bcachefs/btree_gc.h44
-rw-r--r--fs/bcachefs/btree_gc_types.h29
-rw-r--r--fs/bcachefs/btree_io.c93
-rw-r--r--fs/bcachefs/btree_iter.c35
-rw-r--r--fs/bcachefs/btree_key_cache.c43
-rw-r--r--fs/bcachefs/btree_locking.c1
-rw-r--r--fs/bcachefs/btree_node_scan.c9
-rw-r--r--fs/bcachefs/btree_types.h16
-rw-r--r--fs/bcachefs/buckets.c295
-rw-r--r--fs/bcachefs/buckets.h17
-rw-r--r--fs/bcachefs/buckets_types.h2
-rw-r--r--fs/bcachefs/chardev.c9
-rw-r--r--fs/bcachefs/data_update.c3
-rw-r--r--fs/bcachefs/debug.c109
-rw-r--r--fs/bcachefs/disk_groups_format.h21
-rw-r--r--fs/bcachefs/ec.c28
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/error.c19
-rw-r--r--fs/bcachefs/error.h7
-rw-r--r--fs/bcachefs/extents.c9
-rw-r--r--fs/bcachefs/fs-io-buffered.c6
-rw-r--r--fs/bcachefs/fs-io-direct.c4
-rw-r--r--fs/bcachefs/fs-ioctl.c19
-rw-r--r--fs/bcachefs/fs.c36
-rw-r--r--fs/bcachefs/fsck.c54
-rw-r--r--fs/bcachefs/io_read.c37
-rw-r--r--fs/bcachefs/io_write.c19
-rw-r--r--fs/bcachefs/journal.c8
-rw-r--r--fs/bcachefs/journal_io.c20
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c2
-rw-r--r--fs/bcachefs/journal_seq_blacklist_format.h15
-rw-r--r--fs/bcachefs/lru.h3
-rw-r--r--fs/bcachefs/mean_and_variance_test.c1
-rw-r--r--fs/bcachefs/move.c16
-rw-r--r--fs/bcachefs/movinggc.c7
-rw-r--r--fs/bcachefs/opts.h2
-rw-r--r--fs/bcachefs/recovery.c12
-rw-r--r--fs/bcachefs/replicas_format.h31
-rw-r--r--fs/bcachefs/sb-downgrade.c15
-rw-r--r--fs/bcachefs/sb-downgrade_format.h17
-rw-r--r--fs/bcachefs/sb-errors.c14
-rw-r--r--fs/bcachefs/sb-errors_format.h309
-rw-r--r--fs/bcachefs/sb-errors_types.h281
-rw-r--r--fs/bcachefs/sb-members_format.h110
-rw-r--r--fs/bcachefs/seqmutex.h11
-rw-r--r--fs/bcachefs/snapshot.c100
-rw-r--r--fs/bcachefs/snapshot.h1
-rw-r--r--fs/bcachefs/str_hash.h2
-rw-r--r--fs/bcachefs/super-io.c25
-rw-r--r--fs/bcachefs/super.c31
-rw-r--r--fs/btrfs/bio.c4
-rw-r--r--fs/btrfs/block-group.c11
-rw-r--r--fs/btrfs/btrfs_inode.h10
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent_io.c60
-rw-r--r--fs/btrfs/file.c16
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/ordered-data.c31
-rw-r--r--fs/btrfs/qgroup.c4
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/tree-log.c60
-rw-r--r--fs/cachefiles/daemon.c3
-rw-r--r--fs/cachefiles/internal.h5
-rw-r--r--fs/cachefiles/ondemand.c218
-rw-r--r--fs/dcache.c15
-rw-r--r--fs/debugfs/inode.c10
-rw-r--r--fs/file.c4
-rw-r--r--fs/iomap/buffered-io.c58
-rw-r--r--fs/jfs/xattr.c4
-rw-r--r--fs/netfs/buffered_write.c2
-rw-r--r--fs/netfs/direct_write.c2
-rw-r--r--fs/netfs/objects.c5
-rw-r--r--fs/netfs/write_collect.c7
-rw-r--r--fs/netfs/write_issue.c9
-rw-r--r--fs/nfs/dir.c77
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/pagelist.c5
-rw-r--r--fs/nfs/symlink.c2
-rw-r--r--fs/nfsd/netlink.c2
-rw-r--r--fs/nfsd/netlink.h3
-rw-r--r--fs/nfsd/nfsctl.c50
-rw-r--r--fs/nfsd/nfssvc.c1
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/journal.c209
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h27
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/open.c4
-rw-r--r--fs/overlayfs/dir.c8
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/signalfd.c6
-rw-r--r--fs/smb/client/cifsfs.c3
-rw-r--r--fs/smb/client/cifsglob.h3
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/cifssmb.c8
-rw-r--r--fs/smb/client/file.c27
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/smb2ops.c3
-rw-r--r--fs/smb/client/smb2pdu.c22
-rw-r--r--fs/smb/client/smb2transport.c2
-rw-r--r--fs/smb/common/cifs_arc4.c1
-rw-r--r--fs/smb/common/cifs_md4.c1
-rw-r--r--fs/smb/server/smb2pdu.c22
-rw-r--r--fs/smb/server/vfs.c17
-rw-r--r--fs/smb/server/vfs.h3
-rw-r--r--fs/smb/server/vfs_cache.c3
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c6
-rw-r--r--fs/xfs/libxfs/xfs_attr.c40
-rw-r--r--fs/xfs/libxfs/xfs_attr.h3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c32
-rw-r--r--fs/xfs/libxfs/xfs_fs.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c47
-rw-r--r--fs/xfs/libxfs/xfs_sb.c7
-rw-r--r--fs/xfs/scrub/scrub.c2
-rw-r--r--fs/xfs/scrub/xfarray.c9
-rw-r--r--fs/xfs/xfs_attr_item.c17
-rw-r--r--fs/xfs/xfs_bmap_util.c30
-rw-r--r--fs/xfs/xfs_bmap_util.h2
-rw-r--r--fs/xfs/xfs_handle.c7
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c47
-rw-r--r--fs/xfs/xfs_iomap.c34
-rw-r--r--fs/xfs/xfs_iwalk.c5
-rw-r--r--fs/xfs/xfs_reflink.c1
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/asm-generic/syscalls.h2
-rw-r--r--include/drm/drm_buddy.h6
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h21
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h22
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h22
-rw-r--r--include/dt-bindings/net/ti-dp83867.h4
-rw-r--r--include/dt-bindings/net/ti-dp83869.h4
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h6
-rw-r--r--include/linux/atomic/atomic-instrumented.h8
-rw-r--r--include/linux/atomic/atomic-long.h4
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk-integrity.h10
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/btf.h2
-rw-r--r--include/linux/cdrom.h2
-rw-r--r--include/linux/closure.h23
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler_types.h23
-rw-r--r--include/linux/dev_printk.h8
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dmaengine.h33
-rw-r--r--include/linux/eeprom_93xx46.h32
-rw-r--r--include/linux/etherdevice.h8
-rw-r--r--include/linux/filter.h14
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/huge_mm.h10
-rw-r--r--include/linux/i2c.h25
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h14
-rw-r--r--include/linux/iio/buffer-dma.h31
-rw-r--r--include/linux/iio/buffer_impl.h33
-rw-r--r--include/linux/iio/consumer.h10
-rw-r--r--include/linux/iio/iio.h94
-rw-r--r--include/linux/iio/imu/adis.h87
-rw-r--r--include/linux/io_uring_types.h4
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/kcov.h49
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockdep.h5
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/math.h2
-rw-r--r--include/linux/mhi.h2
-rw-r--r--include/linux/misc/keba.h25
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/netfs.h18
-rw-r--r--include/linux/numa.h5
-rw-r--r--include/linux/nvme.h6
-rw-r--r--include/linux/page-flags.h21
-rw-r--r--include/linux/pagemap.h38
-rw-r--r--include/linux/parport.h6
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/peci-cpu.h24
-rw-r--r--include/linux/peci.h6
-rw-r--r--include/linux/pgalloc_tag.h11
-rw-r--r--include/linux/pnp.h6
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/pse-pd/pse.h4
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/serial_core.h21
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/syscalls.h20
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/tpm.h17
-rw-r--r--include/linux/vfio.h1
-rw-r--r--include/linux/vfio_pci_core.h2
-rw-r--r--include/linux/w1.h7
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/net/bluetooth/hci_core.h36
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/page_pool/types.h5
-rw-r--r--include/net/request_sock.h12
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/tcp_ao.h7
-rw-r--r--include/scsi/scsi_devinfo.h4
-rw-r--r--include/scsi/scsi_transport_sas.h2
-rw-r--r--include/sound/dmaengine_pcm.h1
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/trace/events/cachefiles.h8
-rw-r--r--include/trace/events/firewire.h113
-rw-r--r--include/trace/events/qdisc.h2
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/linux/cn_proc.h3
-rw-r--r--include/uapi/linux/iio/buffer.h22
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/kd.h96
-rw-r--r--include/uapi/linux/netdev.h1
-rw-r--r--include/uapi/linux/stat.h2
-rw-r--r--init/Kconfig2
-rw-r--r--io_uring/cancel.h4
-rw-r--r--io_uring/io-wq.c10
-rw-r--r--io_uring/io_uring.c5
-rw-r--r--io_uring/io_uring.h2
-rw-r--r--io_uring/memmap.c5
-rw-r--r--io_uring/napi.c24
-rw-r--r--io_uring/net.c6
-rw-r--r--io_uring/opdef.c5
-rw-r--r--io_uring/register.c4
-rw-r--r--io_uring/rsrc.c2
-rw-r--r--kernel/auditfilter.c5
-rw-r--r--kernel/bpf/arena.c16
-rw-r--r--kernel/bpf/core.c7
-rw-r--r--kernel/bpf/devmap.c3
-rw-r--r--kernel/bpf/ringbuf.c31
-rw-r--r--kernel/bpf/syscall.c11
-rw-r--r--kernel/bpf/verifier.c100
-rw-r--r--kernel/cpu.c11
-rw-r--r--kernel/dma/map_benchmark.c25
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rwxr-xr-xkernel/gen_kheaders.sh9
-rw-r--r--kernel/kallsyms.c23
-rw-r--r--kernel/kcov.c1
-rw-r--r--kernel/module/kallsyms.c25
-rw-r--r--kernel/pid_namespace.c1
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/conopt.c146
-rw-r--r--kernel/printk/console_cmdline.h6
-rw-r--r--kernel/printk/printk.c23
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/tick-common.c42
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/bpf_trace.c12
-rw-r--r--kernel/trace/ftrace.c13
-rw-r--r--kernel/trace/trace_probe.c4
-rw-r--r--kernel/trace/trace_uprobe.c14
-rw-r--r--kernel/workqueue.c51
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/alloc_tag.c16
-rw-r--r--lib/closure.c54
-rw-r--r--lib/fortify_kunit.c5
-rw-r--r--lib/math/prime_numbers.c1
-rw-r--r--lib/math/rational-test.c1
-rw-r--r--lib/overflow_kunit.c20
-rw-r--r--lib/string_helpers_kunit.c1
-rw-r--r--lib/string_kunit.c1
-rw-r--r--lib/test_dynamic_debug.c1
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/debug_vm_pgtable.c31
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c36
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/internal.h6
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/kmsan/core.c15
-rw-r--r--mm/ksm.c17
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/memory.c23
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/migrate.c13
-rw-r--r--mm/mm_init.c43
-rw-r--r--mm/page_alloc.c61
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/page_table_check.c11
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slub.c12
-rw-r--r--mm/util.c14
-rw-r--r--mm/vmalloc.c23
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/batman-adv/originator.c27
-rw-r--r--net/batman-adv/translation-table.c47
-rw-r--r--net/bluetooth/hci_sync.c2
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bpf/test_run.c6
-rw-r--r--net/bridge/br_mst.c13
-rw-r--r--net/can/j1939/main.c6
-rw-r--r--net/can/j1939/transport.c21
-rw-r--r--net/core/dev.c15
-rw-r--r--net/core/dst_cache.c2
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/net_namespace.c9
-rw-r--r--net/core/netdev-genl.c16
-rw-r--r--net/core/rtnetlink.c44
-rw-r--r--net/core/sock.c3
-rw-r--r--net/core/sock_map.c22
-rw-r--r--net/core/xdp.c4
-rw-r--r--net/dccp/ipv4.c7
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ethtool/tsinfo.c6
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c75
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/fib_frontend.c7
-rw-r--r--net/ipv4/inet_connection_sock.c17
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c2
-rw-r--r--net/ipv4/route.c22
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_ao.c19
-rw-r--r--net/ipv4/tcp_input.c44
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/ila/ila_lwt.c7
-rw-r--r--net/ipv6/ioam6_iptunnel.c8
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/netfilter.c1
-rw-r--r--net/ipv6/route.c38
-rw-r--r--net/ipv6/rpl_iptunnel.c14
-rw-r--r--net/ipv6/seg6_iptunnel.c14
-rw-r--r--net/ipv6/seg6_local.c8
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/driver-ops.c17
-rw-r--r--net/mac80211/he.c10
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c22
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/mesh.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c13
-rw-r--r--net/mac80211/parse.c2
-rw-r--r--net/mac80211/scan.c31
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mptcp/pm_netlink.c21
-rw-r--r--net/mptcp/protocol.c10
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-manage.c73
-rw-r--r--net/ncsi/ncsi-rsp.c4
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/ipset/ip_set_core.c92
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c33
-rw-r--r--net/netfilter/nf_conntrack_standalone.c15
-rw-r--r--net/netfilter/nf_hooks_lwtunnel.c70
-rw-r--r--net/netfilter/nf_internals.h6
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_fib.c8
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_meta.c3
-rw-r--r--net/netfilter/nft_payload.c99
-rw-r--r--net/netrom/nr_timer.c3
-rw-r--r--net/openvswitch/conntrack.c7
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_ct.c16
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_taprio.c29
-rw-r--r--net/smc/af_smc.c22
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c5
-rw-r--r--net/tipc/node.c1
-rw-r--r--net/unix/af_unix.c164
-rw-r--r--net/unix/diag.c12
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/pmsr.c8
-rw-r--r--net/wireless/rdev-ops.h6
-rw-r--r--net/wireless/scan.c62
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/util.c7
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xfrm/xfrm_policy.c11
-rw-r--r--rust/kernel/alloc/vec_ext.rs7
-rw-r--r--scripts/Makefile.dtbinst2
-rw-r--r--scripts/Makefile.host2
-rw-r--r--scripts/Makefile.package2
-rw-r--r--scripts/atomic/kerneldoc/sub_and_test2
-rw-r--r--scripts/dtc/Makefile2
-rw-r--r--scripts/gdb/linux/Makefile2
-rw-r--r--scripts/kconfig/confdata.c13
-rw-r--r--scripts/kconfig/expr.c31
-rw-r--r--scripts/kconfig/expr.h6
-rw-r--r--scripts/kconfig/gconf.c3
-rw-r--r--scripts/kconfig/menu.c2
-rw-r--r--scripts/kconfig/symbol.c6
-rwxr-xr-xscripts/link-vmlinux.sh9
-rwxr-xr-xscripts/make_fit.py3
-rwxr-xr-xscripts/mksysmap28
-rw-r--r--scripts/mod/modpost.c5
-rw-r--r--scripts/package/kernel.spec8
-rw-r--r--security/Kconfig.hardening15
-rw-r--r--security/apparmor/audit.c6
-rw-r--r--security/apparmor/include/audit.h2
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_policy.c15
-rw-r--r--security/landlock/fs.c13
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/include/audit.h4
-rw-r--r--security/selinux/ss/services.c5
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--security/tomoyo/Kconfig2
-rw-r--r--security/tomoyo/common.c2
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/core/init.c9
-rw-r--r--sound/core/jack.c21
-rw-r--r--sound/core/pcm_dmaengine.c22
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/seq_ump_convert.c60
-rw-r--r--sound/core/ump.c15
-rw-r--r--sound/core/ump_convert.c1
-rw-r--r--sound/drivers/mts64.c1
-rw-r--r--sound/drivers/portman2x4.c1
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/oss/dmasound/dmasound_core.c1
-rw-r--r--sound/pci/hda/Kconfig2
-rw-r--r--sound/pci/hda/cs35l41_hda.c6
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c8
-rw-r--r--sound/pci/hda/cs35l56_hda.c9
-rw-r--r--sound/pci/hda/patch_realtek.c64
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c4
-rw-r--r--sound/soc/amd/acp/acp-i2s.c8
-rw-r--r--sound/soc/amd/acp/acp-pci.c12
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/atmel/atmel-classd.c7
-rw-r--r--sound/soc/codecs/cs35l56-shared.c4
-rw-r--r--sound/soc/codecs/cs42l43-jack.c4
-rw-r--r--sound/soc/codecs/cs42l43.c5
-rw-r--r--sound/soc/codecs/es8326.c8
-rw-r--r--sound/soc/codecs/rt5645.c24
-rw-r--r--sound/soc/codecs/rt5645.h6
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c3
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c1
-rw-r--r--sound/soc/intel/avs/topology.c19
-rw-r--r--sound/soc/intel/boards/Kconfig2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c11
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c10
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c1
-rw-r--r--sound/soc/mxs/mxs-pcm.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-lpass-dais.c32
-rw-r--r--sound/soc/qcom/sdw.c1
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c13
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c8
-rw-r--r--sound/soc/soc-topology.c35
-rw-r--r--sound/soc/sof/amd/acp-common.c4
-rw-r--r--sound/soc/sof/amd/acp.c2
-rw-r--r--sound/soc/sof/amd/acp63.c4
-rw-r--r--sound/soc/sof/amd/pci-acp63.c1
-rw-r--r--sound/soc/sof/amd/pci-rmb.c1
-rw-r--r--sound/soc/sof/amd/pci-rn.c1
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c1
-rw-r--r--sound/soc/sof/amd/rembrandt.c4
-rw-r--r--sound/soc/sof/amd/renoir.c4
-rw-r--r--sound/soc/sof/amd/vangogh.c4
-rw-r--r--sound/soc/sof/core.c2
-rw-r--r--sound/soc/sof/imx/imx-common.c1
-rw-r--r--sound/soc/sof/imx/imx8.c3
-rw-r--r--sound/soc/sof/imx/imx8m.c3
-rw-r--r--sound/soc/sof/imx/imx8ulp.c3
-rw-r--r--sound/soc/sof/intel/atom.c1
-rw-r--r--sound/soc/sof/intel/bdw.c1
-rw-r--r--sound/soc/sof/intel/byt.c1
-rw-r--r--sound/soc/sof/intel/hda-codec.c1
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c1
-rw-r--r--sound/soc/sof/intel/hda-dai.c6
-rw-r--r--sound/soc/sof/intel/hda-mlink.c1
-rw-r--r--sound/soc/sof/intel/hda.c1
-rw-r--r--sound/soc/sof/intel/pci-apl.c1
-rw-r--r--sound/soc/sof/intel/pci-cnl.c1
-rw-r--r--sound/soc/sof/intel/pci-icl.c1
-rw-r--r--sound/soc/sof/intel/pci-lnl.c1
-rw-r--r--sound/soc/sof/intel/pci-mtl.c1
-rw-r--r--sound/soc/sof/intel/pci-skl.c1
-rw-r--r--sound/soc/sof/intel/pci-tgl.c1
-rw-r--r--sound/soc/sof/intel/pci-tng.c1
-rw-r--r--sound/soc/sof/ipc4-pcm.c12
-rw-r--r--sound/soc/sof/ipc4-topology.c163
-rw-r--r--sound/soc/sof/ipc4-topology.h6
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c3
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c3
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.c1
-rw-r--r--sound/soc/sof/nocodec.c2
-rw-r--r--sound/soc/sof/sof-acpi-dev.c1
-rw-r--r--sound/soc/sof/sof-audio.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-flood-test.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-kernel-injector.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-msg-injector.c2
-rw-r--r--sound/soc/sof/sof-client-probes.c2
-rw-r--r--sound/soc/sof/sof-of-dev.c1
-rw-r--r--sound/soc/sof/sof-pci-dev.c1
-rw-r--r--sound/soc/sof/sof-utils.c1
-rw-r--r--sound/soc/sof/stream-ipc.c2
-rw-r--r--sound/soc/sof/xtensa/core.c2
-rw-r--r--sound/soc/ti/davinci-mcasp.c9
-rw-r--r--sound/soc/ti/omap-hdmi.c6
-rw-r--r--tools/arch/arm64/include/asm/cputype.h6
-rw-r--r--tools/arch/x86/include/asm/msr-index.h9
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h22
-rw-r--r--tools/bpf/resolve_btfids/main.c2
-rw-r--r--tools/hv/Makefile1
-rw-r--r--tools/include/uapi/asm-generic/unistd.h5
-rw-r--r--tools/include/uapi/drm/i915_drm.h31
-rw-r--r--tools/include/uapi/linux/kvm.h4
-rw-r--r--tools/include/uapi/linux/netdev.h1
-rw-r--r--tools/include/uapi/linux/stat.h4
-rw-r--r--tools/lib/bpf/features.c32
-rw-r--r--tools/perf/Makefile.perf1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-trace.c2
-rw-r--r--tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h8
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h3
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h14
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h22
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h4
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c26
-rw-r--r--tools/power/x86/turbostat/Makefile6
-rw-r--r--tools/power/x86/turbostat/turbostat.c20
-rw-r--r--tools/testing/cxl/test/mem.c1
-rw-r--r--tools/testing/selftests/alsa/Makefile2
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_netkit.c94
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c134
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_link.c35
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi.c50
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c146
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c41
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c187
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c1
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/config8
-rw-r--r--tools/testing/selftests/fchmodat2/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c1
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c12
-rw-r--r--tools/testing/selftests/ftrace/config26
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc3
-rw-r--r--tools/testing/selftests/futex/Makefile2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile2
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c2
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h1
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c1
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c15
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c1
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c1
-rw-r--r--tools/testing/selftests/kvm/s390x/shared_zeropage_test.c111
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_init2_tests.c4
-rw-r--r--tools/testing/selftests/landlock/fs_test.c45
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c38
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c24
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/config3
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c734
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c436
-rw-r--r--tools/testing/selftests/net/config2
-rw-r--r--tools/testing/selftests/net/hsr/config1
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh2
-rw-r--r--tools/testing/selftests/net/lib.sh18
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh15
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh30
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh6
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh46
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh2
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py2
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh335
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh340
-rw-r--r--tools/testing/selftests/openat2/Makefile14
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c1
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json44
-rw-r--r--virt/kvm/dirty_ring.c3
-rw-r--r--virt/kvm/guest_memfd.c5
-rw-r--r--virt/kvm/kvm_main.c19
1746 files changed, 28709 insertions, 11996 deletions
diff --git a/.editorconfig b/.editorconfig
index 854773350cc5..29a30ccfc07b 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -5,7 +5,6 @@ root = true
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
@@ -13,7 +12,6 @@ indent_size = 8
[*.{json,py,rs}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
@@ -26,7 +24,6 @@ indent_size = 8
[*.yaml]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = unset
insert_final_newline = true
indent_style = space
indent_size = 2
diff --git a/.mailmap b/.mailmap
index 43cd2995dbc2..a6c619e22efc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -72,6 +72,8 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
+Andy Shevchenko <andy@kernel.org> <andy@smile.org.ua>
+Andy Shevchenko <andy@kernel.org> <ext-andriy.shevchenko@nokia.com>
Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
@@ -217,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
+Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@@ -337,10 +340,11 @@ Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
-Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
-Kees Cook <keescook@chromium.org> <keescook@google.com>
-Kees Cook <keescook@chromium.org> <kees@outflux.net>
-Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Kees Cook <kees@kernel.org> <kees.cook@canonical.com>
+Kees Cook <kees@kernel.org> <keescook@chromium.org>
+Kees Cook <kees@kernel.org> <keescook@google.com>
+Kees Cook <kees@kernel.org> <kees@outflux.net>
+Kees Cook <kees@kernel.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
@@ -604,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
+Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
index c399323f37de..aa89adf18bc5 100644
--- a/Documentation/ABI/stable/sysfs-bus-nvmem
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -1,6 +1,23 @@
+What: /sys/bus/nvmem/devices/.../force_ro
+Date: June 2024
+KernelVersion: 6.11
+Contact: Marek Vasut <marex@denx.de>
+Description:
+ This read/write attribute allows users to set read-write
+ devices as read-only and back to read-write from userspace.
+ This can be used to unlock and relock write-protection of
+ devices which are generally locked, except during sporadic
+ programming operation.
+ Read returns '0' or '1' for read-write or read-only modes
+ respectively.
+ Write parses one of 'YyTt1NnFf0', or [oO][NnFf] for "on"
+ and "off", i.e. what kstrbool() supports.
+ Note: This file is only present if CONFIG_NVMEM_SYSFS
+ is enabled.
+
What: /sys/bus/nvmem/devices/.../nvmem
Date: July 2015
-KernelVersion: 4.2
+KernelVersion: 4.2
Contact: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Description:
This file allows user to read/write the raw NVMEM contents.
@@ -20,3 +37,14 @@ Description:
...
*
0001000
+
+What: /sys/bus/nvmem/devices/.../type
+Date: November 2018
+KernelVersion: 5.0
+Contact: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Description:
+ This read-only attribute allows user to read the NVMEM
+ device type. Supported types are "Unknown", "EEPROM",
+ "OTP", "Battery backed", "FRAM".
+ Note: This file is only present if CONFIG_NVMEM_SYSFS
+ is enabled.
diff --git a/Documentation/ABI/stable/sysfs-driver-misc-cp500 b/Documentation/ABI/stable/sysfs-driver-misc-cp500
new file mode 100644
index 000000000000..525bd18a2db4
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-misc-cp500
@@ -0,0 +1,25 @@
+What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/0000:XX:XX.X/version
+Date: June 2024
+KernelVersion: 6.11
+Contact: Gerhard Engleder <eg@keba.com>
+Description: Version of the FPGA configuration bitstream as printable string.
+ This file is read only.
+Users: KEBA
+
+What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/0000:XX:XX.X/keep_cfg
+Date: June 2024
+KernelVersion: 6.11
+Contact: Gerhard Engleder <eg@keba.com>
+Description: Flag which signals if FPGA shall keep or reload configuration
+ bitstream on reset. Normal FPGA behavior and default is to keep
+ configuration bitstream and to only reset the configured logic.
+
+ Reloading configuration on reset enables an update of the
+ configuration bitstream with a simple reboot. Otherwise it is
+ necessary to power cycle the device to reload the new
+ configuration bitstream.
+
+ This file is read/write. The values are as follows:
+ 1 = keep configuration bitstream on reset, default
+ 0 = reload configuration bitstream on reset
+Users: KEBA
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-inv_icm42600 b/Documentation/ABI/testing/sysfs-bus-iio-inv_icm42600
new file mode 100644
index 000000000000..7eeacfb7650d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-inv_icm42600
@@ -0,0 +1,18 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_power_mode
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Accelerometer power mode. Setting this attribute will set the
+ requested power mode to use if the ODR support it. If ODR
+ support only 1 mode, power mode will be enforced.
+ Reading this attribute will return the current accelerometer
+ power mode if the sensor is on, or the requested value if the
+ sensor is off. The value between real and requested value can
+ be different for ODR supporting only 1 mode.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_power_mode_available
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ List of available accelerometer power modes that can be set in
+ in_accel_power_mode attribute.
diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst
index 4bc9c2b4da6f..bdb2c2e2a1b2 100644
--- a/Documentation/admin-guide/LSM/tomoyo.rst
+++ b/Documentation/admin-guide/LSM/tomoyo.rst
@@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
LiveCD-based tutorials are available at
-http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html
-http://tomoyo.sourceforge.jp/1.8/centos6-live.html
+https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html
+https://tomoyo.sourceforge.net/1.8/centos6-live.html
Though these tutorials use non-LSM version of TOMOYO, they are useful for you
to know what TOMOYO is.
@@ -21,45 +21,32 @@ How to enable TOMOYO?
Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
kernel's command line.
-Please see http://tomoyo.osdn.jp/2.5/ for details.
+Please see https://tomoyo.sourceforge.net/2.6/ for details.
Where is documentation?
=======================
User <-> Kernel interface documentation is available at
-https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
+https://tomoyo.sourceforge.net/2.6/policy-specification/index.html .
Materials we prepared for seminars and symposiums are available at
-https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+https://sourceforge.net/projects/tomoyo/files/docs/ .
Below lists are chosen from three aspects.
What is TOMOYO?
TOMOYO Linux Overview
- https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf
TOMOYO Linux: pragmatic and manageable security for Linux
- https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf
TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
- https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf
What can TOMOYO do?
Deep inside TOMOYO Linux
- https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf
The role of "pathname based access control" in security.
- https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf
History of TOMOYO?
Realities of Mainlining
- https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
-
-What is future plan?
-====================
-
-We believe that inode based security and name based security are complementary
-and both should be used together. But unfortunately, so far, we cannot enable
-multiple LSM modules at the same time. We feel sorry that you have to give up
-SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
-
-We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
-version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ .
-LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
-to port non-LSM version's functionalities to LSM versions.
+ https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 500cfa776225..27ec49af1bf2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -788,25 +788,6 @@
Documentation/networking/netconsole.rst for an
alternative.
- <DEVNAME>:<n>.<n>[,options]
- Use the specified serial port on the serial core bus.
- The addressing uses DEVNAME of the physical serial port
- device, followed by the serial core controller instance,
- and the serial port instance. The options are the same
- as documented for the ttyS addressing above.
-
- The mapping of the serial ports to the tty instances
- can be viewed with:
-
- $ ls -d /sys/bus/serial-base/devices/*:*.*/tty/*
- /sys/bus/serial-base/devices/00:04:0.0/tty/ttyS0
-
- In the above example, the console can be addressed with
- console=00:04:0.0. Note that a console addressed this
- way will only get added when the related device driver
- is ready. The use of an earlycon parameter in addition to
- the console may be desired for console output early on.
-
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio16,<addr>[,options]
@@ -1921,6 +1902,28 @@
Format:
<bus_id>,<clkrate>
+ i2c_touchscreen_props= [HW,ACPI,X86]
+ Set device-properties for ACPI-enumerated I2C-attached
+ touchscreen, to e.g. fix coordinates of upside-down
+ mounted touchscreens. If you need this option please
+ submit a drivers/platform/x86/touchscreen_dmi.c patch
+ adding a DMI quirk for this.
+
+ Format:
+ <ACPI_HW_ID>:<prop_name>=<val>[:prop_name=val][:...]
+ Where <val> is one of:
+ Omit "=<val>" entirely Set a boolean device-property
+ Unsigned number Set a u32 device-property
+ Anything else Set a string device-property
+
+ Examples (split over multiple lines):
+ i2c_touchscreen_props=GDIX1001:touchscreen-inverted-x:
+ touchscreen-inverted-y
+
+ i2c_touchscreen_props=MSSL1680:touchscreen-size-x=1920:
+ touchscreen-size-y=1080:touchscreen-inverted-y:
+ firmware-name=gsl1680-vendor-model.fw:silead,home-button
+
i8042.debug [HW] Toggle i8042 debug mode
i8042.unmask_kbd_data
[HW] Enable printing of interrupt data from the KBD port
@@ -2170,12 +2173,6 @@
Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
- init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
- it was mlock'ed and not explicitly munlock'ed
- afterwards.
- Format: 0 | 1
- Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
-
init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 076443cc10a6..d414d3f5592a 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -467,11 +467,11 @@ anon_fault_fallback_charge
instead falls back to using huge pages with lower orders or
small pages even though the allocation was successful.
-anon_swpout
+swpout
is incremented every time a huge page is swapped out in one
piece without splitting.
-anon_swpout_fallback
+swpout_fallback
is incremented if a huge page has to be split before swapout.
Usually because failed to allocate some continuous swap space
for the huge page.
diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst
index 54d199dce78b..2b420bab0527 100644
--- a/Documentation/arch/riscv/uabi.rst
+++ b/Documentation/arch/riscv/uabi.rst
@@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
Misaligned accesses
-------------------
-Misaligned accesses are supported in userspace, but they may perform poorly.
+Misaligned scalar accesses are supported in userspace, but they may perform
+poorly. Misaligned vector accesses are only supported if the Zicclsm extension
+is supported.
diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst
index 7964fe134277..6c1303cff159 100644
--- a/Documentation/cdrom/cdrom-standard.rst
+++ b/Documentation/cdrom/cdrom-standard.rst
@@ -217,7 +217,7 @@ current *struct* is::
int (*media_changed)(struct cdrom_device_info *, int);
int (*tray_move)(struct cdrom_device_info *, int);
int (*lock_door)(struct cdrom_device_info *, int);
- int (*select_speed)(struct cdrom_device_info *, int);
+ int (*select_speed)(struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
@@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
::
- int select_speed(struct cdrom_device_info *cdi, int speed)
+ int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
Some CD-ROM drives are capable of changing their head-speed. There
are several reasons for changing the speed of a CD-ROM drive. Badly
diff --git a/Documentation/core-api/swiotlb.rst b/Documentation/core-api/swiotlb.rst
index 5ad2c9ca85bc..cf06bae44ff8 100644
--- a/Documentation/core-api/swiotlb.rst
+++ b/Documentation/core-api/swiotlb.rst
@@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
Dynamic swiotlb
---------------
-When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
+When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
index d2dce238ff5d..3e996346b264 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -54,11 +54,10 @@ unevaluatedProperties: false
examples:
- |
- mlahb: ahb@38000000 {
+ ahb {
compatible = "st,mlahb", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x10000000 0x40000>;
ranges;
dma-ranges = <0x00000000 0x38000000 0x10000>,
<0x10000000 0x10000000 0x60000>,
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index c6d0d8d81ed4..c2a158b75e49 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -57,17 +57,17 @@ properties:
- const: allwinner,sun8i-v3s
- description: Anbernic RG35XX (2024)
- - items:
+ items:
- const: anbernic,rg35xx-2024
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX Plus
- - items:
+ items:
- const: anbernic,rg35xx-plus
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX H
- - items:
+ items:
- const: anbernic,rg35xx-h
- const: allwinner,sun50i-h700
diff --git a/Documentation/devicetree/bindings/counter/ti-eqep.yaml b/Documentation/devicetree/bindings/counter/ti-eqep.yaml
index 85f1ff83afe7..c882ab5fcf1f 100644
--- a/Documentation/devicetree/bindings/counter/ti-eqep.yaml
+++ b/Documentation/devicetree/bindings/counter/ti-eqep.yaml
@@ -11,7 +11,9 @@ maintainers:
properties:
compatible:
- const: ti,am3352-eqep
+ enum:
+ - ti,am3352-eqep
+ - ti,am62-eqep
reg:
maxItems: 1
@@ -21,19 +23,35 @@ properties:
maxItems: 1
clocks:
- description: The clock that determines the SYSCLKOUT rate for the eQEP
- peripheral.
+ description: The functional and interface clock that determines the clock
+ rate for the eQEP peripheral.
maxItems: 1
clock-names:
const: sysclkout
+ power-domains:
+ maxItems: 1
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,am62-eqep
+ then:
+ properties:
+ clock-names: false
+
+ required:
+ - power-domains
+
required:
- compatible
- reg
- interrupts
- clocks
- - clock-names
additionalProperties: false
@@ -43,7 +61,6 @@ examples:
compatible = "ti,am3352-eqep";
reg = <0x180 0x80>;
clocks = <&l4ls_gclk>;
- clock-names = "sysclkout";
interrupts = <79>;
};
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index acfb4b2ee7a9..d54140f18d34 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -59,8 +59,8 @@ properties:
- 3
dma-channels:
- minItems: 1
- maxItems: 64
+ minimum: 1
+ maximum: 64
clocks:
minItems: 1
diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
index b1c13bab2472..b2d19cfb87ad 100644
--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
@@ -77,7 +77,7 @@ required:
- clocks
allOf:
- - $ref: i2c-controller.yaml
+ - $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
index ab151c9db219..580003cdfff5 100644
--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
@@ -21,7 +21,7 @@ description: |
google,cros-ec-spi or google,cros-ec-i2c.
allOf:
- - $ref: i2c-controller.yaml#
+ - $ref: /schemas/i2c/i2c-controller.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/adc/adc.yaml b/Documentation/devicetree/bindings/iio/adc/adc.yaml
index 36775f8f71df..8e7835cf36fd 100644
--- a/Documentation/devicetree/bindings/iio/adc/adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adc.yaml
@@ -38,6 +38,25 @@ properties:
The first value specifies the positive input pin, the second
specifies the negative input pin.
+ single-channel:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ When devices combine single-ended and differential channels, allow the
+ channel for a single element to be specified, independent of reg (as for
+ differential channels). If this and diff-channels are not present reg
+ shall be used instead.
+
+ common-mode-channel:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Some ADCs have differential input pins that can be used to measure
+ single-ended or pseudo-differential inputs. This property can be used
+ in addition to single-channel to signal software that this channel is
+ not differential but still specify two inputs.
+
+ The input pair is specified by setting single-channel to the positive
+ input pin and common-mode-channel to the negative pin.
+
settling-time-us:
description:
Time between enabling the channel and first stable readings.
@@ -50,4 +69,15 @@ properties:
device design and can interact with other characteristics such as
settling time.
+anyOf:
+ - oneOf:
+ - required:
+ - reg
+ - diff-channels
+ - required:
+ - reg
+ - single-channel
+ - required:
+ - reg
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
index ea6cfcd0aff4..17c5d39cc2c1 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
@@ -19,7 +19,18 @@ description: |
primarily for measurement of signals close to DC but also delivers
outstanding performance with input bandwidths out to ~10kHz.
+ Analog Devices AD411x ADC's:
+ The AD411X family encompasses a series of low power, low noise, 24-bit,
+ sigma-delta analog-to-digital converters that offer a versatile range of
+ specifications. They integrate an analog front end suitable for processing
+ fully differential/single-ended and bipolar voltage inputs.
+
Datasheets for supported chips:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD4111.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD4112.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD4114.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD4115.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD4116.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/AD7172-2.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/AD7172-4.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/AD7173-8.pdf
@@ -31,6 +42,11 @@ description: |
properties:
compatible:
enum:
+ - adi,ad4111
+ - adi,ad4112
+ - adi,ad4114
+ - adi,ad4115
+ - adi,ad4116
- adi,ad7172-2
- adi,ad7172-4
- adi,ad7173-8
@@ -129,10 +145,56 @@ patternProperties:
maximum: 15
diff-channels:
+ description: |
+ This property is used for defining the inputs of a differential
+ voltage channel. The first value is the positive input and the second
+ value is the negative input of the channel.
+
+ Family AD411x supports a dedicated VINCOM voltage input.
+ To select it set the second channel to 16.
+ (VIN2, VINCOM) -> diff-channels = <2 16>
+
+ There are special values that can be selected besides the voltage
+ analog inputs:
+ 21: REF+
+ 22: REF−
+
+ Supported only by AD7172-2, AD7172-4, AD7175-2, AD7175-8, AD7177-2,
+ must be paired together and can be used to monitor the power supply
+ of the ADC:
+ 19: ((AVDD1 − AVSS)/5)+
+ 20: ((AVDD1 − AVSS)/5)−
+
items:
minimum: 0
maximum: 31
+ single-channel:
+ description: |
+ This property is used for defining a current channel or the positive
+ input of a voltage channel (single-ended or pseudo-differential).
+
+ Models AD4111 and AD4112 support current channels.
+ Example: (IIN2+, IIN2−) -> single-channel = <2>
+ To correctly configure a current channel set the "adi,current-channel"
+ property to true.
+
+ To configure a single-ended/pseudo-differential channel set the
+ "common-mode-channel" property to the desired negative voltage input.
+
+ When used as a voltage channel, special inputs are valid as well.
+ minimum: 0
+ maximum: 31
+
+ common-mode-channel:
+ description:
+ This property is used for defining the negative input of a
+ single-ended or pseudo-differential voltage channel.
+
+ Special inputs are valid as well.
+ minimum: 0
+ maximum: 31
+
adi,reference-select:
description: |
Select the reference source to use when converting on
@@ -154,9 +216,31 @@ patternProperties:
- avdd
default: refout-avss
+ adi,current-channel:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ Signal that the selected inputs are current channels.
+ Only available on AD4111 and AD4112.
+
required:
- reg
- - diff-channels
+
+ allOf:
+ - oneOf:
+ - required: [single-channel]
+ properties:
+ diff-channels: false
+ - required: [diff-channels]
+ properties:
+ single-channel: false
+ adi,current-channel: false
+ common-mode-channel: false
+
+ - if:
+ required: [common-mode-channel]
+ then:
+ properties:
+ adi,current-channel: false
required:
- compatible
@@ -166,7 +250,6 @@ allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
# Only ad7172-4, ad7173-8 and ad7175-8 support vref2
- # Other models have [0-3] channel registers
- if:
properties:
compatible:
@@ -187,6 +270,37 @@ allOf:
- vref
- refout-avss
- avdd
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad4114
+ - adi,ad4115
+ - adi,ad4116
+ - adi,ad7173-8
+ - adi,ad7175-8
+ then:
+ patternProperties:
+ "^channel@[0-9a-f]$":
+ properties:
+ reg:
+ maximum: 15
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7172-2
+ - adi,ad7175-2
+ - adi,ad7176-2
+ - adi,ad7177-2
+ then:
+ patternProperties:
+ "^channel@[0-9a-f]$":
+ properties:
reg:
maximum: 3
@@ -211,6 +325,34 @@ allOf:
- adi,reference-select
- if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad4111
+ - adi,ad4112
+ - adi,ad4114
+ - adi,ad4115
+ - adi,ad4116
+ then:
+ properties:
+ avdd2-supply: false
+
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - adi,ad4111
+ - adi,ad4112
+ then:
+ patternProperties:
+ "^channel@[0-9a-f]$":
+ properties:
+ adi,current-channel: false
+
+ - if:
anyOf:
- required: [clock-names]
- required: [clocks]
@@ -221,6 +363,7 @@ allOf:
unevaluatedProperties: false
examples:
+ # Example AD7173-8 with external reference connected to REF+/REF-:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -277,3 +420,50 @@ examples:
};
};
};
+
+ # Example AD4111 with current channel and single-ended channel:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad4111";
+ reg = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "rdy";
+ interrupt-parent = <&gpio>;
+ spi-max-frequency = <5000000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #clock-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ bipolar;
+ diff-channels = <4 5>;
+ };
+
+ // Single ended channel VIN2/VINCOM
+ channel@1 {
+ reg = <1>;
+ bipolar;
+ single-channel = <2>;
+ common-mode-channel = <16>;
+ };
+
+ // Current channel IN2+/IN2-
+ channel@2 {
+ reg = <2>;
+ single-channel = <2>;
+ adi,current-channel;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 16def2985ab4..a03da9489ed9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -21,8 +21,15 @@ properties:
- adi,ad7190
- adi,ad7192
- adi,ad7193
+ - adi,ad7194
- adi,ad7195
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
reg:
maxItems: 1
@@ -41,6 +48,11 @@ properties:
interrupts:
maxItems: 1
+ aincom-supply:
+ description: |
+ AINCOM voltage supply. Analog inputs AINx are referenced to this input
+ when configured for pseudo-differential operation.
+
dvdd-supply:
description: DVdd voltage supply
@@ -84,6 +96,42 @@ properties:
description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
type: boolean
+patternProperties:
+ "^channel@[0-9a-f]+$":
+ type: object
+ $ref: adc.yaml
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ description: The channel index.
+ minimum: 0
+ maximum: 271
+
+ diff-channels:
+ description:
+ Both inputs can be connected to pins AIN1 to AIN16 by choosing the
+ appropriate value from 1 to 16.
+ items:
+ minimum: 1
+ maximum: 16
+
+ single-channel:
+ description:
+ Positive input can be connected to pins AIN1 to AIN16 by choosing the
+ appropriate value from 1 to 16. Negative input is connected to AINCOM.
+ items:
+ minimum: 1
+ maximum: 16
+
+ oneOf:
+ - required:
+ - reg
+ - diff-channels
+ - required:
+ - reg
+ - single-channel
+
required:
- compatible
- reg
@@ -98,6 +146,17 @@ required:
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - adi,ad7190
+ - adi,ad7192
+ - adi,ad7193
+ - adi,ad7195
+ then:
+ patternProperties:
+ "^channel@[0-9a-f]+$": false
unevaluatedProperties: false
@@ -117,6 +176,7 @@ examples:
clock-names = "mclk";
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
+ aincom-supply = <&aincom>;
dvdd-supply = <&dvdd>;
avdd-supply = <&avdd>;
vref-supply = <&vref>;
@@ -127,3 +187,38 @@ examples:
adi,burnout-currents-enable;
};
};
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7194";
+ reg = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+ spi-cpha;
+ clocks = <&ad7192_mclk>;
+ clock-names = "mclk";
+ interrupts = <25 0x2>;
+ interrupt-parent = <&gpio>;
+ aincom-supply = <&aincom>;
+ dvdd-supply = <&dvdd>;
+ avdd-supply = <&avdd>;
+ vref-supply = <&vref>;
+
+ channel@0 {
+ reg = <0>;
+ diff-channels = <1 6>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ single-channel = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
new file mode 100644
index 000000000000..899b777017ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7380.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices Simultaneous Sampling Analog to Digital Converters
+
+maintainers:
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ * https://www.analog.com/en/products/ad7380.html
+ * https://www.analog.com/en/products/ad7381.html
+ * https://www.analog.com/en/products/ad7383.html
+ * https://www.analog.com/en/products/ad7384.html
+ * https://www.analog.com/en/products/ad7380-4.html
+ * https://www.analog.com/en/products/ad7381-4.html
+ * https://www.analog.com/en/products/ad7383-4.html
+ * https://www.analog.com/en/products/ad7384-4.html
+
+$ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7380
+ - adi,ad7381
+ - adi,ad7383
+ - adi,ad7384
+ - adi,ad7380-4
+ - adi,ad7381-4
+ - adi,ad7383-4
+ - adi,ad7384-4
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 80000000
+ spi-cpol: true
+ spi-cpha: true
+
+ vcc-supply:
+ description: A 3V to 3.6V supply that powers the chip.
+
+ vlogic-supply:
+ description:
+ A 1.65V to 3.6V supply for the logic pins.
+
+ refio-supply:
+ description:
+ A 2.5V to 3.3V supply for the external reference voltage. When omitted,
+ the internal 2.5V reference is used.
+
+ aina-supply:
+ description:
+ The common mode voltage supply for the AINA- pin on pseudo-differential
+ chips.
+
+ ainb-supply:
+ description:
+ The common mode voltage supply for the AINB- pin on pseudo-differential
+ chips.
+
+ ainc-supply:
+ description:
+ The common mode voltage supply for the AINC- pin on pseudo-differential
+ chips.
+
+ aind-supply:
+ description:
+ The common mode voltage supply for the AIND- pin on pseudo-differential
+ chips.
+
+ interrupts:
+ description:
+ When the device is using 1-wire mode, this property is used to optionally
+ specify the ALERT interrupt.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - vcc-supply
+ - vlogic-supply
+
+unevaluatedProperties: false
+
+allOf:
+ # pseudo-differential chips require common mode voltage supplies,
+ # true differential chips don't use them
+ - if:
+ properties:
+ compatible:
+ enum:
+ - adi,ad7383
+ - adi,ad7384
+ - adi,ad7383-4
+ - adi,ad7384-4
+ then:
+ required:
+ - aina-supply
+ - ainb-supply
+ else:
+ properties:
+ aina-supply: false
+ ainb-supply: false
+ - if:
+ properties:
+ compatible:
+ enum:
+ - adi,ad7383-4
+ - adi,ad7384-4
+ then:
+ required:
+ - ainc-supply
+ - aind-supply
+ else:
+ properties:
+ ainc-supply: false
+ aind-supply: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7380";
+ reg = <0>;
+
+ spi-cpol;
+ spi-cpha;
+ spi-max-frequency = <80000000>;
+
+ interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpio0>;
+
+ vcc-supply = <&supply_3_3V>;
+ vlogic-supply = <&supply_3_3V>;
+ refio-supply = <&supply_2_5V>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 7fa46df1f4fb..00fdaed11cbd 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -11,6 +11,7 @@ maintainers:
description: |
Analog Devices AD7606 Simultaneous Sampling ADC
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD7605-4.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/ad7606_7606-6_7606-4.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/AD7606B.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/AD7616.pdf
@@ -19,9 +20,9 @@ properties:
compatible:
enum:
- adi,ad7605-4
- - adi,ad7606-8
- - adi,ad7606-6
- adi,ad7606-4
+ - adi,ad7606-6
+ - adi,ad7606-8 # Referred to as AD7606 (without -8) in the datasheet
- adi,ad7606b
- adi,ad7616
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
index 7e8328e9ce13..f748f3a60b35 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
@@ -66,6 +66,9 @@ properties:
nvmem-cell-names:
const: temperature_calib
+ power-domains:
+ maxItems: 1
+
allOf:
- if:
properties:
diff --git a/Documentation/devicetree/bindings/iio/adc/mediatek,mt6359-auxadc.yaml b/Documentation/devicetree/bindings/iio/adc/mediatek,mt6359-auxadc.yaml
new file mode 100644
index 000000000000..6497c416094d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/mediatek,mt6359-auxadc.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/mediatek,mt6359-auxadc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT6350 series PMIC AUXADC
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The Auxiliary Analog/Digital Converter (AUXADC) is an ADC found
+ in some MediaTek PMICs, performing various PMIC related measurements
+ such as battery and PMIC internal voltage regulators temperatures,
+ accessory detection resistance (usually, for a 3.5mm audio jack)
+ other than voltages for various PMIC internal components.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt6357-auxadc
+ - mediatek,mt6358-auxadc
+ - mediatek,mt6359-auxadc
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - "#io-channel-cells"
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index c1b1324fa132..2722edab1d9a 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -246,6 +246,10 @@ patternProperties:
From common IIO binding. Used to pipe external sigma delta
modulator or internal ADC output to DFSDM channel.
+ port:
+ $ref: /schemas/sound/audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- "#sound-dai-cells"
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
index d605999ffe28..718f633c6e04 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
@@ -18,6 +18,7 @@ properties:
enum:
- ti,ads1015
- ti,ads1115
+ - ti,tla2021
- ti,tla2024
reg:
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads1119.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads1119.yaml
new file mode 100644
index 000000000000..ba6850ab1f90
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads1119.yaml
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/ti,ads1119.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments ADS1119 ADC
+
+maintainers:
+ - João Paulo Gonçalves <jpaulo.silvagoncalves@gmail.com>
+
+description:
+ The TI ADS1119 is a precision 16-bit ADC over I2C that offers single-ended and
+ differential measurements using a multiplexed input. It features a programmable
+ gain, a programmable sample rate, an internal oscillator and voltage reference,
+ and a 50/60Hz rejection filter.
+
+properties:
+ compatible:
+ const: ti,ads1119
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+
+ avdd-supply: true
+ dvdd-supply: true
+
+ vref-supply:
+ description:
+ ADC external reference voltage (VREF).
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - avdd-supply
+ - dvdd-supply
+
+patternProperties:
+ "^channel@([0-6])$":
+ $ref: adc.yaml
+ type: object
+ properties:
+ reg:
+ minimum: 0
+ maximum: 6
+
+ diff-channels:
+ description:
+ Differential input channels AIN0-AIN1, AIN2-AIN3 and AIN1-AIN2.
+ oneOf:
+ - items:
+ - const: 0
+ - const: 1
+ - items:
+ - const: 2
+ - const: 3
+ - items:
+ - const: 1
+ - const: 2
+
+ single-channel:
+ description:
+ Single-ended input channels AIN0, AIN1, AIN2 and AIN3.
+ minimum: 0
+ maximum: 3
+
+ oneOf:
+ - required:
+ - diff-channels
+ - required:
+ - single-channel
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@40 {
+ compatible = "ti,ads1119";
+ reg = <0x40>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+ avdd-supply = <&reg_avdd_ads1119>;
+ dvdd-supply = <&reg_dvdd_ads1119>;
+ vref-supply = <&reg_vref_ads1119>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ channel@0 {
+ reg = <0>;
+ single-channel = <0>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ diff-channels = <0 1>;
+ };
+
+ channel@2 {
+ reg = <2>;
+ single-channel = <3>;
+ };
+
+ channel@3 {
+ reg = <3>;
+ single-channel = <1>;
+ };
+
+ channel@4 {
+ reg = <4>;
+ single-channel = <2>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ diff-channels = <1 2>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ diff-channels = <2 3>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/chemical/sciosense,ens160.yaml b/Documentation/devicetree/bindings/iio/chemical/sciosense,ens160.yaml
new file mode 100644
index 000000000000..267033a68abb
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/sciosense,ens160.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/chemical/sciosense,ens160.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ScioSense ENS160 multi-gas sensor
+
+maintainers:
+ - Gustavo Silva <gustavograzs@gmail.com>
+
+description: |
+ Digital Multi-Gas Sensor for Monitoring Indoor Air Quality.
+
+ Datasheet:
+ https://www.sciosense.com/wp-content/uploads/2023/12/ENS160-Datasheet.pdf
+
+properties:
+ compatible:
+ enum:
+ - sciosense,ens160
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+ vddio-supply: true
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gas-sensor@52 {
+ compatible = "sciosense,ens160";
+ reg = <0x52>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gas-sensor@0 {
+ compatible = "sciosense,ens160";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ interrupt-parent = <&gpio>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
index 96340a05754c..fc8b97f82077 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
@@ -13,13 +13,17 @@ maintainers:
description: |
Bindings for the Analog Devices AD3552R DAC device and similar.
Datasheet can be found here:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad3541r.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/ad3542r.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad3551r.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/ad3552r.pdf
properties:
compatible:
enum:
+ - adi,ad3541r
- adi,ad3542r
+ - adi,ad3551r
- adi,ad3552r
reg:
@@ -92,13 +96,13 @@ patternProperties:
maximum: 511
minimum: -511
- adi,gain-scaling-p-inv-log2:
- description: GainP = 1 / ( 2 ^ adi,gain-scaling-p-inv-log2)
+ adi,gain-scaling-p:
+ description: GainP = 1 / ( 2 ^ adi,gain-scaling-p)
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
- adi,gain-scaling-n-inv-log2:
- description: GainN = 1 / ( 2 ^ adi,gain-scaling-n-inv-log2)
+ adi,gain-scaling-n:
+ description: GainN = 1 / ( 2 ^ adi,gain-scaling-n)
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
@@ -107,8 +111,8 @@ patternProperties:
required:
- adi,gain-offset
- - adi,gain-scaling-p-inv-log2
- - adi,gain-scaling-n-inv-log2
+ - adi,gain-scaling-p
+ - adi,gain-scaling-n
- adi,rfb-ohms
required:
@@ -128,7 +132,9 @@ allOf:
properties:
compatible:
contains:
- const: adi,ad3542r
+ enum:
+ - adi,ad3541r
+ - adi,ad3542r
then:
patternProperties:
"^channel@([0-1])$":
@@ -139,7 +145,7 @@ allOf:
Voltage output range of the channel as <minimum, maximum>
Required connections:
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
- Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
+ Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
oneOf:
- items:
- const: 0
@@ -158,7 +164,9 @@ allOf:
properties:
compatible:
contains:
- const: adi,ad3552r
+ enum:
+ - adi,ad3551r
+ - adi,ad3552r
then:
patternProperties:
"^channel@([0-1])$":
@@ -182,6 +190,21 @@ allOf:
- const: -10000000
- const: 10000000
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad3541r
+ - adi,ad3551r
+ then:
+ properties:
+ channel@1: false
+ channel@0:
+ properties:
+ reg:
+ const: 0
+
required:
- compatible
- reg
@@ -208,8 +231,8 @@ examples:
reg = <1>;
custom-output-range-config {
adi,gain-offset = <5>;
- adi,gain-scaling-p-inv-log2 = <1>;
- adi,gain-scaling-n-inv-log2 = <2>;
+ adi,gain-scaling-p = <1>;
+ adi,gain-scaling-n = <2>;
adi,rfb-ohms = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml
index 43cbf27114c7..d1d1311332f8 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml
@@ -28,6 +28,12 @@ properties:
clock-names:
const: clkin
+ '#clock-cells':
+ const: 0
+
+ clock-output-names:
+ maxItems: 1
+
gpios:
maxItems: 1
description: Lock detect GPIO.
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
index 9b7ad609f7db..9d185f7bfdcb 100644
--- a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
@@ -30,12 +30,19 @@ properties:
- adi,adis16467-2
- adi,adis16467-3
- adi,adis16500
+ - adi,adis16501
- adi,adis16505-1
- adi,adis16505-2
- adi,adis16505-3
- adi,adis16507-1
- adi,adis16507-2
- adi,adis16507-3
+ - adi,adis16575-2
+ - adi,adis16575-3
+ - adi,adis16576-2
+ - adi,adis16576-3
+ - adi,adis16577-2
+ - adi,adis16577-3
reg:
maxItems: 1
@@ -90,12 +97,19 @@ allOf:
contains:
enum:
- adi,adis16500
+ - adi,adis16501
- adi,adis16505-1
- adi,adis16505-2
- adi,adis16505-3
- adi,adis16507-1
- adi,adis16507-2
- adi,adis16507-3
+ - adi,adis16575-2
+ - adi,adis16575-3
+ - adi,adis16576-2
+ - adi,adis16576-3
+ - adi,adis16577-2
+ - adi,adis16577-3
then:
properties:
@@ -112,6 +126,23 @@ allOf:
dependencies:
adi,sync-mode: [ clocks ]
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,adis16575-2
+ - adi,adis16575-3
+ - adi,adis16576-2
+ - adi,adis16576-3
+ - adi,adis16577-2
+ - adi,adis16577-3
+
+ then:
+ properties:
+ spi-max-frequency:
+ maximum: 15000000
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
index 56e0dc20f5e4..e3eec38897bf 100644
--- a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
@@ -23,6 +23,12 @@ properties:
- adi,adis16497-1
- adi,adis16497-2
- adi,adis16497-3
+ - adi,adis16545-1
+ - adi,adis16545-2
+ - adi,adis16545-3
+ - adi,adis16547-1
+ - adi,adis16547-2
+ - adi,adis16547-3
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
index 47cfba939ca6..3b0a2d8b2e91 100644
--- a/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
@@ -16,7 +16,11 @@ description: |
properties:
compatible:
- const: bosch,bmi160
+ oneOf:
+ - const: bosch,bmi160
+ - items:
+ - const: bosch,bmi120
+ - const: bosch,bmi160
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml b/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml
index 91c318746bf3..ecf2339e02f6 100644
--- a/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml
+++ b/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml
@@ -4,14 +4,19 @@
$id: http://devicetree.org/schemas/iio/light/vishay,veml6075.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Vishay VEML6075 UVA and UVB sensor
+title: Vishay VEML6075 UVA/B and VEML6040 RGBW sensors
maintainers:
- Javier Carrasco <javier.carrasco.cruz@gmail.com>
+description:
+ VEML6040 datasheet at https://www.vishay.com/docs/84276/veml6040.pdf
+
properties:
compatible:
- const: vishay,veml6075
+ enum:
+ - vishay,veml6040
+ - vishay,veml6075
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
index fff7e3d83a02..71c1ee33a393 100644
--- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
+++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
@@ -26,6 +26,7 @@ properties:
- st,lis2dw12
- st,lis2hh12
- st,lis2dh12-accel
+ - st,lis2ds12
- st,lis302dl
- st,lis331dl-accel
- st,lis331dlh-accel
diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
index dc4ac41f2441..a62916d07a08 100644
--- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
+++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
@@ -18,9 +18,12 @@ allOf:
properties:
compatible:
- enum:
- - elan,ekth6915
- - ilitek,ili2901
+ oneOf:
+ - items:
+ - enum:
+ - elan,ekth5015m
+ - const: elan,ekth6915
+ - const: elan,ekth6915
reg:
const: 0x10
@@ -33,6 +36,12 @@ properties:
reset-gpios:
description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.
+ no-reset-on-power-off:
+ type: boolean
+ description:
+ Reset line is wired so that it can (and should) be left deasserted when
+ the power supply is off.
+
vcc33-supply:
description: The 3.3V supply to the touchscreen.
@@ -58,8 +67,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- ap_ts: touchscreen@10 {
- compatible = "elan,ekth6915";
+ touchscreen@10 {
+ compatible = "elan,ekth5015m", "elan,ekth6915";
reg = <0x10>;
interrupt-parent = <&tlmm>;
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
new file mode 100644
index 000000000000..1abeec768d79
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/ilitek,ili2901.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI2901 touchscreen controller
+
+maintainers:
+ - Jiri Kosina <jkosina@suse.com>
+
+description:
+ Supports the Ilitek ILI2901 touchscreen controller.
+ This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
+
+allOf:
+ - $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ilitek,ili2901
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ panel: true
+
+ reset-gpios:
+ maxItems: 1
+
+ vcc33-supply: true
+
+ vccio-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - vcc33-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@41 {
+ compatible = "ilitek,ili2901";
+ reg = <0x41>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
+ vcc33-supply = <&pp3300_ts>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/misc/qemu,vcpu-stall-detector.yaml b/Documentation/devicetree/bindings/misc/qemu,vcpu-stall-detector.yaml
index 1aebeb696ee0..e12d80be00cd 100644
--- a/Documentation/devicetree/bindings/misc/qemu,vcpu-stall-detector.yaml
+++ b/Documentation/devicetree/bindings/misc/qemu,vcpu-stall-detector.yaml
@@ -29,6 +29,9 @@ properties:
Defaults to 10 if unset.
default: 10
+ interrupts:
+ maxItems: 1
+
timeout-sec:
description: |
The stall detector expiration timeout measured in seconds.
@@ -43,9 +46,12 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
vmwdt@9030000 {
compatible = "qemu,vcpu-stall-detector";
reg = <0x9030000 0x10000>;
clock-frequency = <10>;
timeout-sec = <8>;
+ interrupts = <GIC_PPI 15 IRQ_TYPE_EDGE_RISING>;
};
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
index c80c880a9dab..60aaf30d68ed 100644
--- a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
@@ -128,7 +128,6 @@ required:
- cell-index
- reg
- fsl,fman-ports
- - ptp-timer
dependencies:
pcs-handle-names:
diff --git a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
index 828439398fdf..fd4244fceced 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
@@ -24,6 +24,7 @@ properties:
managers:
type: object
+ additionalProperties: false
description:
List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
have 4 or 8 physical ports according to the chip version. No need to
@@ -47,8 +48,9 @@ properties:
- "#size-cells"
patternProperties:
- "^manager@0[0-9a-b]$":
+ "^manager@[0-9a-b]$":
type: object
+ additionalProperties: false
description:
PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
ports.
@@ -69,9 +71,14 @@ properties:
patternProperties:
'^port@[0-7]$':
type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
required:
- reg
- additionalProperties: false
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
index 4147adb11e10..6992d56832bf 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -29,13 +29,31 @@ properties:
of the ports conversion matrix that establishes relationship between
the logical ports and the physical channels.
type: object
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
patternProperties:
'^channel@[0-7]$':
type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
required:
- reg
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
unevaluatedProperties: false
required:
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml b/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
index 9801fe6f91b5..99ddc9a4af05 100644
--- a/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
@@ -28,6 +28,9 @@ properties:
description: phandle to the secure-monitor node
$ref: /schemas/types.yaml#/definitions/phandle
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- clocks
diff --git a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
index cf5f9e22bb7e..32b8c1eb4e80 100644
--- a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
@@ -28,7 +28,9 @@ properties:
- enum:
- mediatek,mt7622-efuse
- mediatek,mt7623-efuse
+ - mediatek,mt7981-efuse
- mediatek,mt7986-efuse
+ - mediatek,mt7988-efuse
- mediatek,mt8173-efuse
- mediatek,mt8183-efuse
- mediatek,mt8186-efuse
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 50846a2d09c8..0bf2d9f093b5 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -29,7 +29,6 @@ properties:
- qcom,pm7325-gpio
- qcom,pm7550ba-gpio
- qcom,pm8005-gpio
- - qcom,pm8008-gpio
- qcom,pm8018-gpio
- qcom,pm8019-gpio
- qcom,pm8038-gpio
@@ -126,7 +125,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,pm8008-gpio
- qcom,pmi8950-gpio
- qcom,pmr735d-gpio
then:
@@ -448,7 +446,6 @@ $defs:
- gpio1-gpio10 for pm7325
- gpio1-gpio8 for pm7550ba
- gpio1-gpio4 for pm8005
- - gpio1-gpio2 for pm8008
- gpio1-gpio6 for pm8018
- gpio1-gpio12 for pm8038
- gpio1-gpio40 for pm8058
diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
index 0874fc21f66f..6577a61cc075 100644
--- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
+++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
@@ -65,6 +65,7 @@ patternProperties:
description: The hard wired USB devices
type: object
$ref: /schemas/usb/usb-device.yaml
+ additionalProperties: true
required:
- peer-hub
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index fbf47f0bacf1..044e2001f4e3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1254,6 +1254,8 @@ patternProperties:
description: Smart Battery System
"^schindler,.*":
description: Schindler
+ "^sciosense,.*":
+ description: ScioSense B.V.
"^seagate,.*":
description: Seagate Technology PLC
"^seeed,.*":
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index ecf139f73da4..d491e385d61a 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -80,6 +80,10 @@ The details of these operations are:
- slave_sg: DMA a list of scatter gather buffers from/to a peripheral
+ - peripheral_dma_vec: DMA an array of scatter gather buffers from/to a
+ peripheral. Similar to slave_sg, but uses an array of dma_vec
+ structures instead of a scatterlist.
+
- dma_cyclic: Perform a cyclic DMA operation from/to a peripheral till the
operation is explicitly stopped.
@@ -102,6 +106,11 @@ The details of these operations are:
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
+ struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
+ struct dma_chan *chan, const struct dma_vec *vecs,
+ size_t nents, enum dma_data_direction direction,
+ unsigned long flags);
+
struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index ceac2a300e32..3085f8b460fa 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -433,6 +433,12 @@ supported.
- residue: Provides the residue bytes of the transfer for those that
support residue.
+- ``device_prep_peripheral_dma_vec``
+
+ - Similar to ``device_prep_slave_sg``, but it takes a pointer to a
+ array of ``dma_vec`` structures, which (in the long run) will replace
+ scatterlists.
+
- ``device_issue_pending``
- Takes the first transaction descriptor in the pending queue,
@@ -544,6 +550,10 @@ dma_cookie_t
- Not really relevant any more since the introduction of ``virt-dma``
that abstracts it away.
+dma_vec
+
+- A small structure that contains a DMA address and length.
+
DMA_CTRL_ACK
- If clear, the descriptor cannot be reused by provider until the
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 18caebad7376..ac9ee7441887 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -464,7 +464,10 @@ SLAVE DMA ENGINE
SPI
devm_spi_alloc_master()
devm_spi_alloc_slave()
+ devm_spi_optimize_message()
devm_spi_register_controller()
+ devm_spi_register_host()
+ devm_spi_register_target()
WATCHDOG
devm_watchdog_register_device()
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 7c3a565ffbef..82d142de3461 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -571,6 +571,7 @@ encoded manner. The codes are the following:
um userfaultfd missing tracking
uw userfaultfd wr-protect tracking
ss shadow stack page
+ sl sealed
== =======================================
Note that there is no guarantee that every flag and associated mnemonic will
diff --git a/Documentation/i2c/i2c_bus.svg b/Documentation/i2c/i2c_bus.svg
index 3170de976373..45801de4af7d 100644
--- a/Documentation/i2c/i2c_bus.svg
+++ b/Documentation/i2c/i2c_bus.svg
@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!-- Updated to inclusive terminology by Wolfram Sang -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
@@ -1120,7 +1121,7 @@
<rect
style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect4424-3-2-9-7"
- width="112.5"
+ width="134.5"
height="113.75008"
x="112.5"
y="471.11221"
@@ -1133,15 +1134,15 @@
y="521.46259"
id="text4349"><tspan
sodipodi:role="line"
- x="167.5354"
+ x="178.5354"
y="521.46259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1273">I2C</tspan><tspan
sodipodi:role="line"
- x="167.5354"
+ x="178.5354"
y="552.71259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
- id="tspan1285">Master</tspan></text>
+ id="tspan1285">Controller</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3"
@@ -1171,7 +1172,7 @@
x="318.59131"
y="552.08752"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287">Slave</tspan></text>
+ id="tspan1287">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
@@ -1233,7 +1234,7 @@
x="468.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287-6">Slave</tspan></text>
+ id="tspan1287-6">Target</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3-1"
@@ -1258,7 +1259,7 @@
x="618.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287-9">Slave</tspan></text>
+ id="tspan1287-9">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
d="m 150,583.61221 v 93.75"
diff --git a/Documentation/i2c/summary.rst b/Documentation/i2c/summary.rst
index 786c618ba3be..579a1c7df200 100644
--- a/Documentation/i2c/summary.rst
+++ b/Documentation/i2c/summary.rst
@@ -3,29 +3,27 @@ Introduction to I2C and SMBus
=============================
I²C (pronounce: I squared C and written I2C in the kernel documentation) is
-a protocol developed by Philips. It is a slow two-wire protocol (variable
-speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides
+a protocol developed by Philips. It is a two-wire protocol with variable
+speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides
an inexpensive bus for connecting many types of devices with infrequent or
-low bandwidth communications needs. I2C is widely used with embedded
-systems. Some systems use variants that don't meet branding requirements,
+low bandwidth communications needs. I2C is widely used with embedded
+systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
-The latest official I2C specification is the `"I2C-bus specification and user
-manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
-published by NXP Semiconductors. However, you need to log-in to the site to
-access the PDF. An older version of the specification (revision 6) is archived
-`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
+The latest official I2C specification is the `"I²C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
+published by NXP Semiconductors, version 7 as of this writing.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
-a subset of I2C protocols and signaling. Many I2C devices will work on an
+a subset of I2C protocols and signaling. Many I2C devices will work on an
SMBus, but some SMBus protocols add semantics beyond what is required to
-achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
+achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
devices connected through SMBus are RAM modules configured using I2C EEPROMs,
and hardware monitoring chips.
Because the SMBus is mostly a subset of the generalized I2C bus, we can
-use its protocols on many I2C systems. However, there are systems that don't
+use its protocols on many I2C systems. However, there are systems that don't
meet both SMBus and I2C electrical constraints; and others which can't
implement all the common SMBus protocol semantics or messages.
@@ -33,29 +31,52 @@ implement all the common SMBus protocol semantics or messages.
Terminology
===========
-Using the terminology from the official documentation, the I2C bus connects
-one or more *master* chips and one or more *slave* chips.
+The I2C bus connects one or more controller chips and one or more target chips.
.. kernel-figure:: i2c_bus.svg
- :alt: Simple I2C bus with one master and 3 slaves
+ :alt: Simple I2C bus with one controller and 3 targets
Simple I2C bus
-A **master** chip is a node that starts communications with slaves. In the
-Linux kernel implementation it is called an **adapter** or bus. Adapter
-drivers are in the ``drivers/i2c/busses/`` subdirectory.
+A **controller** chip is a node that starts communications with targets. In the
+Linux kernel implementation it is also called an "adapter" or "bus". Controller
+drivers are usually in the ``drivers/i2c/busses/`` subdirectory.
-An **algorithm** contains general code that can be used to implement a
-whole class of I2C adapters. Each specific adapter driver either depends on
-an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
-its own implementation.
+An **algorithm** contains general code that can be used to implement a whole
+class of I2C controllers. Each specific controller driver either depends on an
+algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its
+own implementation.
-A **slave** chip is a node that responds to communications when addressed
-by the master. In Linux it is called a **client**. Client drivers are kept
-in a directory specific to the feature they provide, for example
-``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
+A **target** chip is a node that responds to communications when addressed by a
+controller. In the Linux kernel implementation it is also called a "client".
+While targets are usually separate external chips, Linux can also act as a
+target (needs hardware support) and respond to another controller on the bus.
+This is then called a **local target**. In contrast, an external chip is called
+a **remote target**.
+
+Target drivers are kept in a directory specific to the feature they provide,
+for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
video-related chips.
-For the example configuration in figure, you will need a driver for your
-I2C adapter, and drivers for your I2C devices (usually one driver for each
-device).
+For the example configuration in the figure above, you will need one driver for
+the I2C controller, and drivers for your I2C targets. Usually one driver for
+each target.
+
+Synonyms
+--------
+
+As mentioned above, the Linux I2C implementation historically uses the terms
+"adapter" for controller and "client" for target. A number of data structures
+have these synonyms in their name. So, when discussing implementation details,
+you should be aware of these terms as well. The official wording is preferred,
+though.
+
+Outdated terminology
+--------------------
+
+In earlier I2C specifications, controller was named "master" and target was
+named "slave". These terms have been obsoleted with v7 of the specification and
+their use is also discouraged by the Linux Kernel Code of Conduct. You may
+still find them in references to documentation which has not been updated. The
+general attitude, however, is to use the inclusive terms: controller and
+target. Work to replace the old terminology in the Linux Kernel is on-going.
diff --git a/Documentation/iio/adis16475.rst b/Documentation/iio/adis16475.rst
index 130f9e97cc17..4bf0998be36e 100644
--- a/Documentation/iio/adis16475.rst
+++ b/Documentation/iio/adis16475.rst
@@ -380,24 +380,5 @@ data is structured.
4. IIO Interfacing Tools
========================
-Linux Kernel Tools
-------------------
-
-Linux Kernel provides some userspace tools that can be used to retrieve data
-from IIO sysfs:
-
-* lsiio: example application that provides a list of IIO devices and triggers
-* iio_event_monitor: example application that reads events from an IIO device
- and prints them
-* iio_generic_buffer: example application that reads data from buffer
-* iio_utils: set of APIs, typically used to access sysfs files.
-
-LibIIO
-------
-
-LibIIO is a C/C++ library that provides generic access to IIO devices. The
-library abstracts the low-level details of the hardware, and provides a simple
-yet complete programming interface that can be used for advanced projects.
-
-For more information about LibIIO, please see:
-https://github.com/analogdevicesinc/libiio
+See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO
+interfacing tools.
diff --git a/Documentation/iio/adis16480.rst b/Documentation/iio/adis16480.rst
new file mode 100644
index 000000000000..bc78fa04d958
--- /dev/null
+++ b/Documentation/iio/adis16480.rst
@@ -0,0 +1,443 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================
+ADIS16480 driver
+================
+
+This driver supports Analog Device's IMUs on SPI bus.
+
+1. Supported devices
+====================
+
+* `ADIS16375 <https://www.analog.com/ADIS16375>`_
+* `ADIS16480 <https://www.analog.com/ADIS16480>`_
+* `ADIS16485 <https://www.analog.com/ADIS16485>`_
+* `ADIS16488 <https://www.analog.com/ADIS16488>`_
+* `ADIS16490 <https://www.analog.com/ADIS16490>`_
+* `ADIS16495 <https://www.analog.com/ADIS16495>`_
+* `ADIS16497 <https://www.analog.com/ADIS16497>`_
+* `ADIS16545 <https://www.analog.com/ADIS16545>`_
+* `ADIS16547 <https://www.analog.com/ADIS16547>`_
+
+Each supported device is a complete inertial system that includes a triaxial
+gyroscope and a triaxial accelerometer. Each inertial sensor in device combines
+with signal conditioning that optimizes dynamic performance. The factory
+calibration characterizes each sensor for sensitivity, bias, and alignment. As
+a result, each sensor has its own dynamic compensation formulas that provide
+accurate sensor measurements.
+
+2. Device attributes
+====================
+
+Accelerometer, gyroscope measurements are always provided. Furthermore, the
+driver offers the capability to retrieve the delta angle and the delta velocity
+measurements computed by the device.
+
+The delta angle measurements represent a calculation of angular displacement
+between each sample update, while the delta velocity measurements represent a
+calculation of linear velocity change between each sample update.
+
+Finally, temperature data are provided which show a coarse measurement of
+the temperature inside of the IMU device. This data is most useful for
+monitoring relative changes in the thermal environment.
+
+ADIS16480 and ADIS16488 also provide access to barometric pressure data and
+triaxial magnetometer measurements.
+
+Each IIO device, has a device folder under ``/sys/bus/iio/devices/iio:deviceX``,
+where X is the IIO index of the device. Under these folders reside a set of
+device files, depending on the characteristics and features of the hardware
+device in questions. These files are consistently generalized and documented in
+the IIO ABI documentation.
+
+The following tables show the adis16480 related device files, found in the
+specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+
+**Available only for ADIS16480 and ADIS16488:**
+
++------------------------------------------+---------------------------------------------------------+
+| 3-Axis Magnetometer related device files | Description |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_scale | Scale for the magnetometer channels. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_x_calibbias | Calibration offset for the X-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_x_filter_low_pass_3db_frequency | Bandwidth for the X-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_x_raw | Raw X-axis magnetometer channel value. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_y_calibbias | Calibration offset for the Y-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_y_filter_low_pass_3db_frequency | Bandwidth for the Y-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_y_raw | Raw Y-axis magnetometer channel value. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_z_calibbias | Calibration offset for the Z-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_z_filter_low_pass_3db_frequency | Bandwidth for the Z-axis magnetometer channel. |
++------------------------------------------+---------------------------------------------------------+
+| in_magn_z_raw | Raw Z-axis magnetometer channel value. |
++------------------------------------------+---------------------------------------------------------+
+
++------------------------------------------+-----------------------------------------------------+
+| Barometric pressure sensor related files | Description |
++------------------------------------------+-----------------------------------------------------+
+| in_pressure0_calibbias | Calibration offset for barometric pressure channel. |
++------------------------------------------+-----------------------------------------------------+
+| in_pressure0_raw | Raw barometric pressure channel value. |
++------------------------------------------+-----------------------------------------------------+
+| in_pressure0_scale | Scale for the barometric pressure sensor channel. |
++------------------------------------------+-----------------------------------------------------+
+
+**Available for all supported devices:**
+
++-------------------------------------------+----------------------------------------------------------+
+| 3-Axis Accelerometer related device files | Description |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_scale | Scale for the accelerometer channels. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibscale | Calibration scale for the X-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_filter_low_pass_3db_frequency | Bandwidth for the X-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_raw | Raw X-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibbias | Calibration offset for the Y-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibscale | Calibration scale for the Y-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_filter_low_pass_3db_frequency | Bandwidth for the Y-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_raw | Raw Y-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibbias | Calibration offset for the Z-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibscale | Calibration scale for the Z-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_filter_low_pass_3db_frequency | Bandwidth for the Z-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_raw | Raw Z-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_scale | Scale for delta velocity channels. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_x_raw | Raw X-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_y_raw | Raw Y-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_z_raw | Raw Z-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+
++--------------------------------------------+------------------------------------------------------+
+| 3-Axis Gyroscope related device files | Description |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_scale | Scale for the gyroscope channels. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_calibbias | Calibration offset for the X-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_calibscale | Calibration scale for the X-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_filter_low_pass_3db_frequency | Bandwidth for the X-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_raw | Raw X-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_calibbias | Calibration offset for the Y-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_calibscale | Calibration scale for the Y-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_filter_low_pass_3db_frequency | Bandwidth for the Y-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_raw | Raw Y-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_calibbias | Calibration offset for the Z-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_calibscale | Calibration scale for the Z-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_filter_low_pass_3db_frequency | Bandwidth for the Z-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_raw | Raw Z-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_scale | Scale for delta angle channels. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_x_raw | Raw X-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_y_raw | Raw Y-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_z_raw | Raw Z-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+
++----------------------------------+-------------------------------------------+
+| Temperature sensor related files | Description |
++----------------------------------+-------------------------------------------+
+| in_temp0_raw | Raw temperature channel value. |
++----------------------------------+-------------------------------------------+
+| in_temp0_offset | Offset for the temperature sensor channel.|
++----------------------------------+-------------------------------------------+
+| in_temp0_scale | Scale for the temperature sensor channel. |
++----------------------------------+-------------------------------------------+
+
++-------------------------------+---------------------------------------------------------+
+| Miscellaneous device files | Description |
++-------------------------------+---------------------------------------------------------+
+| name | Name of the IIO device. |
++-------------------------------+---------------------------------------------------------+
+| sampling_frequency | Currently selected sample rate. |
++-------------------------------+---------------------------------------------------------+
+
+The following table shows the adis16480 related device debug files, found in the
+specific device debug folder path ``/sys/kernel/debug/iio/iio:deviceX``.
+
++----------------------+-------------------------------------------------------------------------+
+| Debugfs device files | Description |
++----------------------+-------------------------------------------------------------------------+
+| serial_number | The serial number of the chip in hexadecimal format. |
++----------------------+-------------------------------------------------------------------------+
+| product_id | Chip specific product id (e.g. 16480, 16488, 16545, etc.). |
++----------------------+-------------------------------------------------------------------------+
+| flash_count | The number of flash writes performed on the device. |
++----------------------+-------------------------------------------------------------------------+
+| firmware_revision | String containing the firmware revision in the following format ##.##. |
++----------------------+-------------------------------------------------------------------------+
+| firmware_date | String containing the firmware date in the following format mm-dd-yyyy. |
++----------------------+-------------------------------------------------------------------------+
+
+Channels processed values
+-------------------------
+
+A channel value can be read from its _raw attribute. The value returned is the
+raw value as reported by the devices. To get the processed value of the channel,
+apply the following formula:
+
+.. code-block:: bash
+
+ processed value = (_raw + _offset) * _scale
+
+Where _offset and _scale are device attributes. If no _offset attribute is
+present, simply assume its value is 0.
+
+The adis16480 driver offers data for 7 types of channels, the table below shows
+the measurement units for the processed value, which are defined by the IIO
+framework:
+
++--------------------------------------+---------------------------+
+| Channel type | Measurement unit |
++--------------------------------------+---------------------------+
+| Acceleration on X, Y, and Z axis | Meters per Second squared |
++--------------------------------------+---------------------------+
+| Angular velocity on X, Y and Z axis | Radians per second |
++--------------------------------------+---------------------------+
+| Delta velocity on X. Y, and Z axis | Meters per Second |
++--------------------------------------+---------------------------+
+| Delta angle on X, Y, and Z axis | Radians |
++--------------------------------------+---------------------------+
+| Temperature | Millidegrees Celsius |
++--------------------------------------+---------------------------+
+| Magnetic field along X, Y and Z axis | Gauss |
++--------------------------------------+---------------------------+
+| Barometric pressure | kilo Pascal |
++--------------------------------------+---------------------------+
+
+Usage examples
+--------------
+
+Show device name:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat name
+ adis16545-1
+
+Show accelerometer channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_raw
+ 1376728
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_y_raw
+ 4487621
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_z_raw
+ 262773792
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_scale
+ 0.000000037
+
+- X-axis acceleration = in_accel_x_raw * in_accel_scale = 0.050938936 m/s^2
+- Y-axis acceleration = in_accel_y_raw * in_accel_scale = 0.166041977 m/s^2
+- Z-axis acceleration = in_accel_z_raw * in_accel_scale = 9.722630304 m/s^2
+
+Show gyroscope channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_x_raw
+ -1041702
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_raw
+ -273013
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_z_raw
+ 2745116
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_scale
+ 0.000000001
+
+- X-axis angular velocity = in_anglvel_x_raw * in_anglvel_scale = −0.001041702 rad/s
+- Y-axis angular velocity = in_anglvel_y_raw * in_anglvel_scale = −0.000273013 rad/s
+- Z-axis angular velocity = in_anglvel_z_raw * in_anglvel_scale = 0.002745116 rad/s
+
+Set calibration offset for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 5000 > in_accel_x_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 5000
+
+Set calibration offset for gyroscope channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo -5000 > in_anglvel_y_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_calibbias
+ -5000
+
+Set sampling frequency:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 4250.000000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1000 > sampling_frequency
+ 1062.500000
+
+Set bandwidth for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_filter_low_pass_3db_frequency
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 300 > in_accel_x_filter_low_pass_3db_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_filter_low_pass_3db_frequency
+ 300
+
+Show serial number:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat serial_number
+ 0x000c
+
+Show product id:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat product_id
+ 16545
+
+Show flash count:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat flash_count
+ 88
+
+Show firmware revision:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat firmware_revision
+ 1.4
+
+Show firmware date:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat firmware_date
+ 09-23-2023
+
+3. Device buffers
+=================
+
+This driver supports IIO buffers.
+
+All devices support retrieving the raw acceleration, gyroscope and temperature
+measurements using buffers.
+
+The following device families also support retrieving the delta velocity, delta
+angle and temperature measurements using buffers:
+
+- ADIS16545
+- ADIS16547
+
+However, when retrieving acceleration or gyroscope data using buffers, delta
+readings will not be available and vice versa. This is because the device only
+allows to read either acceleration and gyroscope data or delta velocity and
+delta angle data at a time and switching between these two burst data selection
+modes is time consuming.
+
+Usage examples
+--------------
+
+Set device trigger in current_trigger, if not already set:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat trigger/current_trigger
+
+ root:/sys/bus/iio/devices/iio:device0> echo adis16545-1-dev0 > trigger/current_trigger
+ root:/sys/bus/iio/devices/iio:device0> cat trigger/current_trigger
+ adis16545-1-dev0
+
+Select channels for buffer read:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_x_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_y_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_z_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_temp0_en
+
+Set the number of samples to be stored in the buffer:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 10 > buffer/length
+
+Enable buffer readings:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > buffer/enable
+
+Obtain buffered data::
+
+ root:/sys/bus/iio/devices/iio:device0> hexdump -C /dev/iio\:device0
+ ...
+ 00006aa0 09 62 00 00 ff ff fc a4 00 00 01 69 00 03 3c 08 |.b.........i..<.|
+ 00006ab0 09 61 00 00 00 00 02 96 00 00 02 8f 00 03 37 50 |.a............7P|
+ 00006ac0 09 61 00 00 00 00 12 3d 00 00 0b 89 00 03 2c 0b |.a.....=......,.|
+ 00006ad0 09 61 00 00 00 00 1e dc 00 00 16 dd 00 03 25 bf |.a............%.|
+ 00006ae0 09 61 00 00 00 00 1e e3 00 00 1b bf 00 03 27 0b |.a............'.|
+ 00006af0 09 61 00 00 00 00 15 50 00 00 19 44 00 03 30 fd |.a.....P...D..0.|
+ 00006b00 09 61 00 00 00 00 09 0e 00 00 14 41 00 03 3d 7f |.a.........A..=.|
+ 00006b10 09 61 00 00 ff ff ff f0 00 00 0e bc 00 03 48 d0 |.a............H.|
+ 00006b20 09 63 00 00 00 00 00 9f 00 00 0f 37 00 03 4c fe |.c.........7..L.|
+ 00006b30 09 64 00 00 00 00 0b f6 00 00 18 92 00 03 43 22 |.d............C"|
+ 00006b40 09 64 00 00 00 00 18 df 00 00 22 33 00 03 33 ab |.d........"3..3.|
+ 00006b50 09 63 00 00 00 00 1e 81 00 00 26 be 00 03 29 60 |.c........&...)`|
+ 00006b60 09 63 00 00 00 00 1b 13 00 00 22 2f 00 03 23 91 |.c........"/..#.|
+ ...
+
+See ``Documentation/iio/iio_devbuf.rst`` for more information about how buffered
+data is structured.
+
+4. IIO Interfacing Tools
+========================
+
+See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO
+interfacing tools.
diff --git a/Documentation/iio/iio_dmabuf_api.rst b/Documentation/iio/iio_dmabuf_api.rst
new file mode 100644
index 000000000000..2836cadbd495
--- /dev/null
+++ b/Documentation/iio/iio_dmabuf_api.rst
@@ -0,0 +1,54 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
+High-speed DMABUF interface for IIO
+===================================
+
+1. Overview
+===========
+
+The Industrial I/O subsystem supports access to buffers through a
+file-based interface, with read() and write() access calls through the
+IIO device's dev node.
+
+It additionally supports a DMABUF based interface, where the userspace
+can attach DMABUF objects (externally created) to an IIO buffer, and
+subsequently use them for data transfers.
+
+A userspace application can then use this interface to share DMABUF
+objects between several interfaces, allowing it to transfer data in a
+zero-copy fashion, for instance between IIO and the USB stack.
+
+The userspace application can also memory-map the DMABUF objects, and
+access the sample data directly. The advantage of doing this vs. the
+read() interface is that it avoids an extra copy of the data between the
+kernel and userspace. This is particularly useful for high-speed devices
+which produce several megabytes or even gigabytes of data per second.
+It does however increase the userspace-kernelspace synchronization
+overhead, as the DMA_BUF_SYNC_START and DMA_BUF_SYNC_END IOCTLs have to
+be used for data integrity.
+
+2. User API
+===========
+
+As part of this interface, three new IOCTLs have been added. These three
+IOCTLs have to be performed on the IIO buffer's file descriptor, which
+can be obtained using the IIO_BUFFER_GET_FD_IOCTL() ioctl.
+
+ ``IIO_BUFFER_DMABUF_ATTACH_IOCTL(int fd)``
+ Attach the DMABUF object, identified by its file descriptor, to the
+ IIO buffer. Returns zero on success, and a negative errno value on
+ error.
+
+ ``IIO_BUFFER_DMABUF_DETACH_IOCTL(int fd)``
+ Detach the given DMABUF object, identified by its file descriptor,
+ from the IIO buffer. Returns zero on success, and a negative errno
+ value on error.
+
+ Note that closing the IIO buffer's file descriptor will
+ automatically detach all previously attached DMABUF objects.
+
+ ``IIO_BUFFER_DMABUF_ENQUEUE_IOCTL(struct iio_dmabuf *iio_dmabuf)``
+ Enqueue a previously attached DMABUF object to the buffer queue.
+ Enqueued DMABUFs will be read from (if output buffer) or written to
+ (if input buffer) as long as the buffer is enabled.
diff --git a/Documentation/iio/iio_tools.rst b/Documentation/iio/iio_tools.rst
new file mode 100644
index 000000000000..cc691c7f6365
--- /dev/null
+++ b/Documentation/iio/iio_tools.rst
@@ -0,0 +1,27 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+IIO Interfacing Tools
+=====================
+
+1. Linux Kernel Tools
+=====================
+
+Linux Kernel provides some userspace tools that can be used to retrieve data
+from IIO sysfs:
+
+* lsiio: example application that provides a list of IIO devices and triggers
+* iio_event_monitor: example application that reads events from an IIO device
+ and prints them
+* iio_generic_buffer: example application that reads data from buffer
+* iio_utils: set of APIs, typically used to access sysfs files.
+
+2. LibIIO
+=========
+
+LibIIO is a C/C++ library that provides generic access to IIO devices. The
+library abstracts the low-level details of the hardware, and provides a simple
+yet complete programming interface that can be used for advanced projects.
+
+For more information about LibIIO, please see:
+https://github.com/analogdevicesinc/libiio
diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst
index fb6f9d743211..9cb4c50cb20d 100644
--- a/Documentation/iio/index.rst
+++ b/Documentation/iio/index.rst
@@ -9,6 +9,8 @@ Industrial I/O
iio_configfs
iio_devbuf
+ iio_dmabuf_api
+ iio_tools
Industrial I/O Kernel Drivers
=============================
@@ -18,5 +20,6 @@ Industrial I/O Kernel Drivers
ad7944
adis16475
+ adis16480
bno055
ep93xx_adc
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 555c2f839969..1fb3f5e6193c 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -150,6 +150,12 @@ applicable everywhere (see syntax).
That will limit the usefulness but on the other hand avoid
the illegal configurations all over.
+ If "select" <symbol> is followed by "if" <expr>, <symbol> will be
+ selected by the logical AND of the value of the current menu symbol
+ and <expr>. This means, the lower limit can be downgraded due to the
+ presence of "if" <expr>. This behavior may seem weird, but we rely on
+ it. (The future of this behavior is undecided.)
+
- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
This is similar to "select" as it enforces a lower limit on another
@@ -184,7 +190,7 @@ applicable everywhere (see syntax).
ability to hook into a secondary subsystem while allowing the user to
configure that subsystem out without also having to unset these drivers.
- Note: If the combination of FOO=y and BAR=m causes a link error,
+ Note: If the combination of FOO=y and BAZ=m causes a link error,
you can guard the function call with IS_REACHABLE()::
foo_init()
@@ -202,6 +208,10 @@ applicable everywhere (see syntax).
imply BAR
imply BAZ
+ Note: If "imply" <symbol> is followed by "if" <expr>, the default of <symbol>
+ will be the logical AND of the value of the current menu symbol and <expr>.
+ (The future of this behavior is undecided.)
+
- limiting menu display: "visible if" <expr>
This attribute is only applicable to menu blocks, if the condition is
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index a1f3eb7a43e2..131863142cbb 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -128,7 +128,7 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
- /lib/modules/<kernel_release>/extra/, but a prefix may
+ /lib/modules/<kernel_release>/updates/, but a prefix may
be added with INSTALL_MOD_PATH (discussed in section 5).
clean
@@ -417,7 +417,7 @@ directory:
And external modules are installed in:
- /lib/modules/$(KERNELRELEASE)/extra/
+ /lib/modules/$(KERNELRELEASE)/updates/
5.1 INSTALL_MOD_PATH
--------------------
@@ -438,10 +438,10 @@ And external modules are installed in:
-------------------
External modules are by default installed to a directory under
- /lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
+ /lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
locate modules for a specific functionality in a separate
directory. For this purpose, use INSTALL_MOD_DIR to specify an
- alternative name to "extra."::
+ alternative name to "updates."::
$ make INSTALL_MOD_DIR=gandalf -C $KDIR \
M=$PWD modules_install
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 00dc61358be8..4510e8d1adcb 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -1603,7 +1603,7 @@ operations:
attributes:
- header
reply:
- attributes: &pse
+ attributes:
- header
- podl-pse-admin-state
- podl-pse-admin-control
@@ -1620,7 +1620,10 @@ operations:
do:
request:
- attributes: *pse
+ attributes:
+ - header
+ - podl-pse-admin-control
+ - c33-pse-admin-control
-
name: rss-get
doc: Get RSS params.
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 11a32373365a..959755be4d7f 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -350,6 +350,10 @@ attribute-sets:
buffer space, host descriptors etc.
type: uint
-
+ name: rx-csum-complete
+ doc: Number of packets that were marked as CHECKSUM_COMPLETE.
+ type: uint
+ -
name: rx-csum-unnecessary
doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.
type: uint
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
index d21234097167..6bda7a467301 100644
--- a/Documentation/netlink/specs/nfsd.yaml
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -123,8 +123,6 @@ operations:
doc: dump pending nfsd rpc
attribute-set: rpc-status
dump:
- pre: nfsd-nl-rpc-status-get-start
- post: nfsd-nl-rpc-status-get-done
reply:
attributes:
- xid
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 72da7057e4cf..dceeb0d763aa 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
-In this case, it is possible to use the NIC's packet steering
-capabilities to steer the packets to the right queue. This is not
-possible in the previous example as there is only one queue shared
-among sockets, so the NIC cannot do this steering as it can only steer
-between queues.
-
-In libxdp (or libbpf prior to version 1.0), you need to use the
-xsk_socket__create_shared() API as it takes a reference to a FILL ring
-and a COMPLETION ring that will be created for you and bound to the
-shared UMEM. You can use this function for all the sockets you create,
-or you can use it for the second and following ones and use
-xsk_socket__create() for the first one. Both methods yield the same
-result.
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
-devices at the same time. It is also possible to redirect to any
-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
+devices at the same time.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
- Note that if you are using the XDP_SHARED_UMEM option, it is
- possible to switch traffic between any socket bound to the same
- umem.
-
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index fd96e4a3cef9..5e1fcfad1c4c 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -227,7 +227,7 @@ preferably including links to previous postings, for example::
The amount of mooing will depend on packet rate so should match
the diurnal cycle quite well.
- Signed-of-by: Joe Defarmer <joe@barn.org>
+ Signed-off-by: Joe Defarmer <joe@barn.org>
---
v3:
- add a note about time-of-day mooing fluctuation to the commit message
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 5926115ec0ed..8a251d71fa6e 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -32,6 +32,7 @@ Security-related interfaces
seccomp_filter
landlock
lsm
+ mfd_noexec
spec_ctrl
tee
diff --git a/Documentation/userspace-api/media/v4l/dev-subdev.rst b/Documentation/userspace-api/media/v4l/dev-subdev.rst
index 0f9eda3187f3..161b43f1ce66 100644
--- a/Documentation/userspace-api/media/v4l/dev-subdev.rst
+++ b/Documentation/userspace-api/media/v4l/dev-subdev.rst
@@ -582,7 +582,7 @@ depending on the hardware. In all cases, however, only routes that have the
Devices generating the streams may allow enabling and disabling some of the
routes or have a fixed routing configuration. If the routes can be disabled, not
declaring the routes (or declaring them without
-``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
+``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes
back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE``
flag unset.
diff --git a/Documentation/userspace-api/mfd_noexec.rst b/Documentation/userspace-api/mfd_noexec.rst
new file mode 100644
index 000000000000..7afcc480e38f
--- /dev/null
+++ b/Documentation/userspace-api/mfd_noexec.rst
@@ -0,0 +1,86 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Introduction of non-executable mfd
+==================================
+:Author:
+ Daniel Verkamp <dverkamp@chromium.org>
+ Jeff Xu <jeffxu@chromium.org>
+
+:Contributor:
+ Aleksa Sarai <cyphar@cyphar.com>
+
+Since Linux introduced the memfd feature, memfds have always had their
+execute bit set, and the memfd_create() syscall doesn't allow setting
+it differently.
+
+However, in a secure-by-default system, such as ChromeOS, (where all
+executables should come from the rootfs, which is protected by verified
+boot), this executable nature of memfd opens a door for NoExec bypass
+and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
+process created a memfd to share the content with an external process,
+however the memfd is overwritten and used for executing arbitrary code
+and root escalation. [2] lists more VRP of this kind.
+
+On the other hand, executable memfd has its legit use: runc uses memfd’s
+seal and executable feature to copy the contents of the binary then
+execute them. For such a system, we need a solution to differentiate runc's
+use of executable memfds and an attacker's [3].
+
+To address those above:
+ - Let memfd_create() set X bit at creation time.
+ - Let memfd be sealed for modifying X bit when NX is set.
+ - Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
+ migrating and enforcing non-executable MFD.
+
+User API
+========
+``int memfd_create(const char *name, unsigned int flags)``
+
+``MFD_NOEXEC_SEAL``
+ When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
+ with NX. F_SEAL_EXEC is set and the memfd can't be modified to
+ add X later. MFD_ALLOW_SEALING is also implied.
+ This is the most common case for the application to use memfd.
+
+``MFD_EXEC``
+ When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
+
+Note:
+ ``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
+ an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
+
+
+Sysctl:
+========
+``pid namespaced sysctl vm.memfd_noexec``
+
+The new pid namespaced sysctl vm.memfd_noexec has 3 values:
+
+ - 0: MEMFD_NOEXEC_SCOPE_EXEC
+ memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
+ MFD_EXEC was set.
+
+ - 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
+ memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
+ MFD_NOEXEC_SEAL was set.
+
+ - 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
+ memfd_create() without MFD_NOEXEC_SEAL will be rejected.
+
+The sysctl allows finer control of memfd_create for old software that
+doesn't set the executable bit; for example, a container with
+vm.memfd_noexec=1 means the old software will create non-executable memfd
+by default while new software can create executable memfd by setting
+MFD_EXEC.
+
+The value of vm.memfd_noexec is passed to child namespace at creation
+time. In addition, the setting is hierarchical, i.e. during memfd_create,
+we will search from current ns to root ns and use the most restrictive
+setting.
+
+[1] https://crbug.com/1305267
+
+[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
+
+[3] https://lwn.net/Articles/781013/
diff --git a/Documentation/virt/hyperv/clocks.rst b/Documentation/virt/hyperv/clocks.rst
index a56f4837d443..176043265803 100644
--- a/Documentation/virt/hyperv/clocks.rst
+++ b/Documentation/virt/hyperv/clocks.rst
@@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
space code performs the same algorithm of reading the TSC and
applying the scale and offset to get the constant 10 MHz clock.
-Linux clockevents are based on Hyper-V synthetic timer 0. While
-Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
-timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
-/proc/interrupts. Clockevents based on the virtualized PIT and
-local APIC timer also work, but the Hyper-V synthetic timer is
-preferred.
+Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
+While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
+timer 0. In older versions of Hyper-V, an interrupt from stimer0
+results in a VMBus control message that is demultiplexed by
+vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
+documentation. In newer versions of Hyper-V, stimer0 interrupts can
+be mapped to an architectural interrupt, which is referred to as
+"Direct Mode". Linux prefers to use Direct Mode when available. Since
+x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
+allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
+and explicitly codes it to call the stimer0 interrupt handler. Hence
+interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
+rather than being associated with a Linux IRQ. Clockevents based on the
+virtualized PIT and local APIC timer also work, but Hyper-V stimer0
+is preferred.
The driver for the Hyper-V synthetic system clock and timers is
drivers/clocksource/hyperv_timer.c.
diff --git a/Documentation/virt/hyperv/overview.rst b/Documentation/virt/hyperv/overview.rst
index cd493332c88a..77408a89d1a4 100644
--- a/Documentation/virt/hyperv/overview.rst
+++ b/Documentation/virt/hyperv/overview.rst
@@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
arm64, these synthetic registers must be accessed using explicit
hypercalls.
-* VMbus: VMbus is a higher-level software construct that is built on
+* VMBus: VMBus is a higher-level software construct that is built on
the other 3 mechanisms. It is a message passing interface between
the Hyper-V host and the Linux guest. It uses memory that is shared
between Hyper-V and the guest, along with various signaling
@@ -54,8 +54,8 @@ x86/x64 architecture only.
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
-VMbus is not documented. This documentation provides a high-level
-overview of VMbus and how it works, but the details can be discerned
+VMBus is not documented. This documentation provides a high-level
+overview of VMBus and how it works, but the details can be discerned
only from the code.
Sharing Memory
@@ -74,7 +74,7 @@ follows:
physical address space. How Hyper-V is told about the GPA or list
of GPAs varies. In some cases, a single GPA is written to a
synthetic register. In other cases, a GPA or list of GPAs is sent
- in a VMbus message.
+ in a VMBus message.
* Hyper-V translates the GPAs into "real" physical memory addresses,
and creates a virtual mapping that it can use to access the memory.
@@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
any hot-add CPUs.
A Linux guest CPU may be taken offline using the normal Linux
-mechanisms, provided no VMbus channel interrupts are assigned to
-the CPU. See the section on VMbus Interrupts for more details
-on how VMbus channel interrupts can be re-assigned to permit
+mechanisms, provided no VMBus channel interrupts are assigned to
+the CPU. See the section on VMBus Interrupts for more details
+on how VMBus channel interrupts can be re-assigned to permit
taking a CPU offline.
32-bit and 64-bit
@@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
via flags in synthetic MSRs that Hyper-V provides to the guest,
and the guest code tests these flags.
-VMbus has its own protocol version that is negotiated during the
-initial VMbus connection from the guest to Hyper-V. This version
+VMBus has its own protocol version that is negotiated during the
+initial VMBus connection from the guest to Hyper-V. This version
number is also output to dmesg during boot. This version number
is checked in a few places in the code to determine if specific
functionality is present.
-Furthermore, each synthetic device on VMbus also has a protocol
-version that is separate from the VMbus protocol version. Device
+Furthermore, each synthetic device on VMBus also has a protocol
+version that is separate from the VMBus protocol version. Device
drivers for these synthetic devices typically negotiate the device
protocol version, and may test that protocol version to determine
if specific device functionality is present.
diff --git a/Documentation/virt/hyperv/vmbus.rst b/Documentation/virt/hyperv/vmbus.rst
index d2012d9022c5..1dcef6a7fda3 100644
--- a/Documentation/virt/hyperv/vmbus.rst
+++ b/Documentation/virt/hyperv/vmbus.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-VMbus
+VMBus
=====
-VMbus is a software construct provided by Hyper-V to guest VMs. It
+VMBus is a software construct provided by Hyper-V to guest VMs. It
consists of a control path and common facilities used by synthetic
devices that Hyper-V presents to guest VMs. The control path is
used to offer synthetic devices to the guest VM and, in some cases,
@@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
signaling primitives to allow Hyper-V and the guest to interrupt
each other.
-VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
-entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
-establishes the VMbus control path with the Hyper-V host, then
+VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
+entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
+establishes the VMBus control path with the Hyper-V host, then
registers itself as a Linux bus driver. It implements the standard
bus functions for adding and removing devices to/from the bus.
@@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
the synthetic SCSI controller is "storvsc". These drivers contain
functions with names like "storvsc_connect_to_vsp".
-VMbus channels
+VMBus channels
--------------
-An instance of a synthetic device uses VMbus channels to communicate
+An instance of a synthetic device uses VMBus channels to communicate
between the VSP and the VSC. Channels are bi-directional and used
for passing messages. Most synthetic devices use a single channel,
but the synthetic SCSI controller and synthetic NIC may use multiple
@@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
actual ring. The size of the ring is determined by the VSC in the
guest and is specific to each synthetic device. The list of GPAs
making up the ring is communicated to the Hyper-V host over the
-VMbus control path as a GPA Descriptor List (GPADL). See function
+VMBus control path as a GPA Descriptor List (GPADL). See function
vmbus_establish_gpadl().
Each ring buffer is mapped into contiguous Linux kernel virtual
@@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
approximately 1280 Mbytes. For versions prior to Windows Server
2019, the limit is approximately 384 Mbytes.
-VMbus messages
---------------
-All VMbus messages have a standard header that includes the message
-length, the offset of the message payload, some flags, and a
+VMBus channel messages
+----------------------
+All messages sent in a VMBus channel have a standard header that includes
+the message length, the offset of the message payload, some flags, and a
transactionID. The portion of the message after the header is
unique to each VSP/VSC pair.
@@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
buffer. For example, the storvsc driver uses this approach to
specify the data buffers to/from which disk I/O is done.
-Three functions exist to send VMbus messages:
+Three functions exist to send VMBus channel messages:
1. vmbus_sendpacket(): Control-only messages and messages with
embedded data -- no GPAs
@@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
and valid messages, and Linux drivers for synthetic devices did not
fully validate messages. With the introduction of processor
technologies that fully encrypt guest memory and that allow the
-guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
+guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
the Hyper-V host is no longer a valid assumption. The drivers for
-VMbus synthetic devices are being updated to fully validate any
+VMBus synthetic devices are being updated to fully validate any
values read from memory that is shared with Hyper-V, which includes
-messages from VMbus devices. To facilitate such validation,
+messages from VMBus devices. To facilitate such validation,
messages read by the guest from the "in" ring buffer are copied to a
temporary buffer that is not shared with Hyper-V. Validation is
performed in this temporary buffer without the risk of Hyper-V
maliciously modifying the message after it is validated but before
it is used.
-VMbus interrupts
+Synthetic Interrupt Controller (synic)
+--------------------------------------
+Hyper-V provides each guest CPU with a synthetic interrupt controller
+that is used by VMBus for host-guest communication. While each synic
+defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
+(VMBUS_MESSAGE_SINT). All interrupts related to communication between
+the Hyper-V host and a guest CPU use that SINT.
+
+The SINT is mapped to a single per-CPU architectural interrupt (i.e,
+an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
+each CPU in the guest has a synic and may receive VMBus interrupts,
+they are best modeled in Linux as per-CPU interrupts. This model works
+well on arm64 where a single per-CPU Linux IRQ is allocated for
+VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
+"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
+interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
+across all CPUs and explicitly coded to call vmbus_isr(). In this case,
+there's no Linux IRQ, and the interrupts are visible in aggregate in
+/proc/interrupts on the "HYP" line.
+
+The synic provides the means to demultiplex the architectural interrupt into
+one or more logical interrupts and route the logical interrupt to the proper
+VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
+related functions that access synic data structures.
+
+The synic is not modeled in Linux as an irq chip or irq domain,
+and the demultiplexed logical interrupts are not Linux IRQs. As such,
+they don't appear in /proc/interrupts or /proc/irq. The CPU
+affinity for one of these logical interrupts is controlled via an
+entry under /sys/bus/vmbus as described below.
+
+VMBus interrupts
----------------
-VMbus provides a mechanism for the guest to interrupt the host when
+VMBus provides a mechanism for the guest to interrupt the host when
the guest has queued new messages in a ring buffer. The host
expects that the guest will send an interrupt only when an "out"
ring buffer transitions from empty to non-empty. If the guest sends
@@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
interrupts, the host may throttle that guest by suspending its
execution for a few seconds to prevent a denial-of-service attack.
-Similarly, the host will interrupt the guest when it sends a new
-message on the VMbus control path, or when a VMbus channel "in" ring
-buffer transitions from empty to non-empty. Each CPU in the guest
-may receive VMbus interrupts, so they are best modeled as per-CPU
-interrupts in Linux. This model works well on arm64 where a single
-per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
-per-CPU IRQs, an x86 interrupt vector is statically allocated (see
-HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
-call the VMbus interrupt service routine. These interrupts are
-visible in /proc/interrupts on the "HYP" line.
-
-The guest CPU that a VMbus channel will interrupt is selected by the
+Similarly, the host will interrupt the guest via the synic when
+it sends a new message on the VMBus control path, or when a VMBus
+channel "in" ring buffer transitions from empty to non-empty due to
+the host inserting a new VMBus channel message. The control message stream
+and each VMBus channel "in" ring buffer are separate logical interrupts
+that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
+for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
+bitmap to determine which channels have pending interrupts on this CPU.
+If multiple channels have pending interrupts for this CPU, they are
+processed sequentially. When all channel interrupts have been processed,
+vmbus_isr() checks for and processes any messages received on the VMBus
+control path.
+
+The guest CPU that a VMBus channel will interrupt is selected by the
guest when the channel is created, and the host is informed of that
-selection. VMbus devices are broadly grouped into two categories:
+selection. VMBus devices are broadly grouped into two categories:
-1. "Slow" devices that need only one VMbus channel. The devices
+1. "Slow" devices that need only one VMBus channel. The devices
(such as keyboard, mouse, heartbeat, and timesync) generate
- relatively few interrupts. Their VMbus channels are all
+ relatively few interrupts. Their VMBus channels are all
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
CPU 0.
-2. "High speed" devices that may use multiple VMbus channels for
+2. "High speed" devices that may use multiple VMBus channels for
higher parallelism and performance. These devices include the
- synthetic SCSI controller and synthetic NIC. Their VMbus
+ synthetic SCSI controller and synthetic NIC. Their VMBus
channels interrupts are assigned to CPUs that are spread out
among the available CPUs in the VM so that interrupts on
multiple channels can be processed in parallel.
-The assignment of VMbus channel interrupts to CPUs is done in the
+The assignment of VMBus channel interrupts to CPUs is done in the
function init_vp_index(). This assignment is done outside of the
normal Linux interrupt affinity mechanism, so the interrupts are
neither "unmanaged" nor "managed" interrupts.
-The CPU that a VMbus channel will interrupt can be seen in
+The CPU that a VMBus channel will interrupt can be seen in
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
When running on later versions of Hyper-V, the CPU can be changed
-by writing a new value to this sysfs entry. Because the interrupt
-assignment is done outside of the normal Linux affinity mechanism,
-there are no entries in /proc/irq corresponding to individual
-VMbus channel interrupts.
+by writing a new value to this sysfs entry. Because VMBus channel
+interrupts are not Linux IRQs, there are no entries in /proc/interrupts
+or /proc/irq corresponding to individual VMBus channel interrupts.
An online CPU in a Linux guest may not be taken offline if it has
-VMbus channel interrupts assigned to it. Any such channel
+VMBus channel interrupts assigned to it. Any such channel
interrupts must first be manually reassigned to another CPU as
described above. When no channel interrupts are assigned to the
CPU, it can be taken offline.
-When a guest CPU receives a VMbus interrupt from the host, the
-function vmbus_isr() handles the interrupt. It first checks for
-channel interrupts by calling vmbus_chan_sched(), which looks at a
-bitmap setup by the host to determine which channels have pending
-interrupts on this CPU. If multiple channels have pending
-interrupts for this CPU, they are processed sequentially. When all
-channel interrupts have been processed, vmbus_isr() checks for and
-processes any message received on the VMbus control path.
-
-The VMbus channel interrupt handling code is designed to work
+The VMBus channel interrupt handling code is designed to work
correctly even if an interrupt is received on a CPU other than the
CPU assigned to the channel. Specifically, the code does not use
CPU-based exclusion for correctness. In normal operation, Hyper-V
@@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
even if there is a time lag before Hyper-V starts interrupting the
new CPU. See comments in target_cpu_store().
-VMbus device creation/deletion
+VMBus device creation/deletion
------------------------------
Hyper-V and the Linux guest have a separate message-passing path
that is used for synthetic device creation and deletion. This
-path does not use a VMbus channel. See vmbus_post_msg() and
+path does not use a VMBus channel. See vmbus_post_msg() and
vmbus_on_msg_dpc().
The first step is for the guest to connect to the generic
-Hyper-V VMbus mechanism. As part of establishing this connection,
-the guest and Hyper-V agree on a VMbus protocol version they will
+Hyper-V VMBus mechanism. As part of establishing this connection,
+the guest and Hyper-V agree on a VMBus protocol version they will
use. This negotiation allows newer Linux kernels to run on older
Hyper-V versions, and vice versa.
The guest then tells Hyper-V to "send offers". Hyper-V sends an
offer message to the guest for each synthetic device that the VM
-is configured to have. Each VMbus device type has a fixed GUID
-known as the "class ID", and each VMbus device instance is also
+is configured to have. Each VMBus device type has a fixed GUID
+known as the "class ID", and each VMBus device instance is also
identified by a GUID. The offer message from Hyper-V contains
both GUIDs to uniquely (within the VM) identify the device.
There is one offer message for each device instance, so a VM with
@@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
the device. Driver/device matching is performed using the standard
Linux mechanism.
-The device driver probe function opens the primary VMbus channel to
+The device driver probe function opens the primary VMBus channel to
the corresponding VSP. It allocates guest memory for the channel
ring buffers and shares the ring buffer with the Hyper-V host by
giving the host a list of GPAs for the ring buffer memory. See
@@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
setup messages via the primary channel. These messages may include
negotiating the device protocol version to be used between the Linux
VSC and the VSP on the Hyper-V host. The setup messages may also
-include creating additional VMbus channels, which are somewhat
+include creating additional VMBus channels, which are somewhat
mis-named as "sub-channels" since they are functionally
equivalent to the primary channel once they are created.
diff --git a/MAINTAINERS b/MAINTAINERS
index d6c90161c7bf..06ecfa64a39a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -439,6 +439,16 @@ W: http://wiki.analog.com/AD7142
W: https://ez.analog.com/linux-software-drivers
F: drivers/input/misc/ad714x.c
+AD738X ADC DRIVER (AD7380/1/2/4)
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+R: David Lechner <dlechner@baylibre.com>
+S: Supported
+W: https://wiki.analog.com/resources/tools-software/linux-drivers/iio-adc/ad738x
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+F: drivers/iio/adc/ad7380.c
+
AD7877 TOUCHSCREEN DRIVER
M: Michael Hennerich <michael.hennerich@analog.com>
S: Supported
@@ -1044,7 +1054,7 @@ M: Joerg Roedel <joro@8bytes.org>
R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/amd/
F: include/linux/amd-iommu.h
@@ -1107,7 +1117,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
F: drivers/cpufreq/amd-pstate*
-F: include/linux/amd-pstate.h
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
@@ -1208,7 +1217,7 @@ F: Documentation/devicetree/bindings/iio/adc/adi,ad7091r*
F: drivers/iio/adc/ad7091r*
ANALOG DEVICES INC AD7192 DRIVER
-M: Alexandru Tachici <alexandru.tachici@analog.com>
+M: Alisa-Dariana Roman <alisa.roman@analog.com>
L: linux-iio@vger.kernel.org
S: Supported
W: https://ez.analog.com/linux-software-drivers
@@ -3522,6 +3531,13 @@ F: include/linux/cfag12864b.h
F: include/uapi/linux/map_to_14segment.h
F: include/uapi/linux/map_to_7segment.h
+AVAGO APDS9306 AMBIENT LIGHT SENSOR DRIVER
+M: Subhajit Ghosh <subhajit.ghosh@tweaklogic.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/light/avago,apds9300.yaml
+F: drivers/iio/light/apds9306.c
+
AVIA HX711 ANALOG DIGITAL CONVERTER IIO DRIVER
M: Andreas Klinger <ak@it-klinger.de>
L: linux-iio@vger.kernel.org
@@ -3854,6 +3870,7 @@ BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Puranjay Mohan <puranjay@kernel.org>
+R: Xu Kuohai <xukuohai@huaweicloud.com>
L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
@@ -3980,7 +3997,7 @@ R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@kernel.org>
-R: Stanislav Fomichev <sdf@google.com>
+R: Stanislav Fomichev <sdf@fomichev.me>
R: Hao Luo <haoluo@google.com>
R: Jiri Olsa <jolsa@kernel.org>
L: bpf@vger.kernel.org
@@ -4083,12 +4100,13 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
-R: Matt Bobrowski <mattbobrowski@google.com>
+M: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
+F: kernel/trace/bpf_trace.c
F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
@@ -5187,7 +5205,6 @@ F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
-M: James Schulman <james.schulman@cirrus.com>
M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5296,7 +5313,7 @@ F: drivers/infiniband/hw/usnic/
CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
L: llvm@lists.linux.dev
S: Supported
@@ -8212,7 +8229,7 @@ F: rust/kernel/net/phy.rs
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
-R: Kees Cook <keescook@chromium.org>
+R: Kees Cook <kees@kernel.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
@@ -8613,7 +8630,7 @@ S: Maintained
F: drivers/net/ethernet/nvidia/*
FORTIFY_SOURCE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -9103,7 +9120,7 @@ F: include/linux/mfd/gsc.h
F: include/linux/platform_data/gsc_hwmon.h
GCC PLUGINS
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -9237,7 +9254,7 @@ S: Maintained
F: drivers/input/touchscreen/resistive-adc-touch.c
GENERIC STRING LIBRARY
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@@ -11035,8 +11052,8 @@ F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
M: Lucas De Marchi <lucas.demarchi@intel.com>
-M: Oded Gabbay <ogabbay@kernel.org>
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-xe@lists.freedesktop.org
S: Supported
W: https://drm.pages.freedesktop.org/intel-docs/
@@ -11157,7 +11174,7 @@ M: David Woodhouse <dwmw2@infradead.org>
M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux.dev
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/intel/
INTEL IPU3 CSI-2 CIO2 DRIVER
@@ -11510,6 +11527,7 @@ M: Jean-Baptiste Maneyrol <jmaneyrol@invensense.com>
L: linux-iio@vger.kernel.org
S: Maintained
W: https://invensense.tdk.com/
+F: Documentation/ABI/testing/sysfs-bus-iio-inv_icm42600
F: Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml
F: drivers/iio/imu/inv_icm42600/
@@ -11530,7 +11548,7 @@ IOMMU DMA-API LAYER
M: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
@@ -11542,7 +11560,7 @@ M: Will Deacon <will@kernel.org>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: Documentation/devicetree/bindings/iommu/
F: Documentation/userspace-api/iommu.rst
F: drivers/iommu/
@@ -11951,7 +11969,7 @@ F: scripts/package/
F: usr/
KERNEL HARDENING (not covered by other areas)
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Gustavo A. R. Silva <gustavoars@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@@ -12383,7 +12401,6 @@ F: drivers/video/backlight/ktz8866.c
KVM PARAVIRT (KVM/paravirt)
M: Paolo Bonzini <pbonzini@redhat.com>
-R: Wanpeng Li <wanpengli@tencent.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
L: kvm@vger.kernel.org
S: Supported
@@ -12479,7 +12496,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tycho Andersen <tycho@tycho.pizza>
-R: Kees Cook <keescook@chromium.org>
+R: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -12775,7 +12792,7 @@ F: arch/powerpc/platforms/8xx/
F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Maintained
F: drivers/misc/lkdtm/*
F: tools/testing/selftests/lkdtm/*
@@ -12905,7 +12922,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/usb/dvb-usb-v2/lmedm04*
LOADPIN SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/LoadPin.rst
@@ -15238,7 +15255,6 @@ F: drivers/staging/most/
F: include/linux/most.h
MOTORCOMM PHY DRIVER
-M: Peter Geis <pgwipeout@gmail.com>
M: Frank <Frank.Sae@motor-comm.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -15827,7 +15843,7 @@ F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
-M: Trond Myklebust <trond.myklebust@hammerspace.com>
+M: Trond Myklebust <trondmy@kernel.org>
M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
@@ -17534,7 +17550,6 @@ F: include/linux/peci.h
PENSANDO ETHERNET DRIVERS
M: Shannon Nelson <shannon.nelson@amd.com>
M: Brett Creeley <brett.creeley@amd.com>
-M: drivers@pensando.io
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -17998,7 +18013,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
M: Joel Granados <j.granados@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@@ -18054,7 +18069,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
PSTORE FILESYSTEM
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Tony Luck <tony.luck@intel.com>
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
L: linux-hardening@vger.kernel.org
@@ -18212,6 +18227,7 @@ QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@quicinc.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+L: linux-arm-msm@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,*
@@ -18499,6 +18515,7 @@ QUALCOMM FASTRPC DRIVER
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Amol Maheshwari <amahesh@qti.qualcomm.com>
L: linux-arm-msm@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
S: Maintained
F: Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
F: drivers/misc/fastrpc.c
@@ -19938,6 +19955,14 @@ F: include/linux/wait.h
F: include/uapi/linux/sched.h
F: kernel/sched/
+SCIOSENSE ENS160 MULTI-GAS SENSOR DRIVER
+M: Gustavo Silva <gustavograzs@gmail.com>
+S: Maintained
+F: drivers/iio/chemical/ens160_core.c
+F: drivers/iio/chemical/ens160_i2c.c
+F: drivers/iio/chemical/ens160_spi.c
+F: drivers/iio/chemical/ens160.h
+
SCSI LIBSAS SUBSYSTEM
R: John Garry <john.g.garry@oracle.com>
R: Jason Yan <yanaijie@huawei.com>
@@ -20060,7 +20085,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
F: drivers/media/cec/platform/seco/seco-cec.h
SECURE COMPUTING
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Andy Lutomirski <luto@amacapital.net>
R: Will Drewry <wad@chromium.org>
S: Supported
@@ -21316,7 +21341,7 @@ F: arch/riscv/boot/dts/starfive/
STARFIVE DWMAC GLUE LAYER
M: Emil Renner Berthing <kernel@esmil.dk>
-M: Samin Guo <samin.guo@starfivetech.com>
+M: Minda Chen <minda.chen@starfivetech.com>
S: Maintained
F: Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -22380,6 +22405,14 @@ M: Robert Richter <rric@kernel.org>
S: Odd Fixes
F: drivers/gpio/gpio-thunderx.c
+TI ADS1119 ADC DRIVER
+M: Francesco Dolcini <francesco@dolcini.it>
+M: João Paulo Gonçalves <jpaulo.silvagoncalves@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/adc/ti,ads1119.yaml
+F: drivers/iio/adc/ti-ads1119.c
+
TI ADS7924 ADC DRIVER
M: Hugo Villeneuve <hvilleneuve@dimonoff.com>
L: linux-iio@vger.kernel.org
@@ -22679,7 +22712,7 @@ L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
S: Maintained
-W: https://tomoyo.osdn.jp/
+W: https://tomoyo.sourceforge.net/
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
@@ -22748,7 +22781,7 @@ M: Jarkko Sakkinen <jarkko@kernel.org>
R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
S: Maintained
-W: https://gitlab.com/jarkkojs/linux-tpmdd-test
+W: https://codeberg.org/jarkko/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
@@ -22974,7 +23007,7 @@ F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
UBSAN
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Marco Elver <elver@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
@@ -23976,7 +24009,6 @@ VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
R: Christoph Hellwig <hch@infradead.org>
-R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@@ -24812,7 +24844,7 @@ F: drivers/net/hamradio/yam*
F: include/linux/yam.h
YAMA SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/Yama.rst
diff --git a/Makefile b/Makefile
index f975b6396328..06aa6402b385 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arc/net/bpf_jit.h b/arch/arc/net/bpf_jit.h
index ec44873c42d1..495f3023e4c1 100644
--- a/arch/arc/net/bpf_jit.h
+++ b/arch/arc/net/bpf_jit.h
@@ -39,7 +39,7 @@
/************** Functions that the back-end must provide **************/
/* Extension for 32-bit operations. */
-inline u8 zext(u8 *buf, u8 rd);
+u8 zext(u8 *buf, u8 rd);
/***** Moves *****/
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
diff --git a/arch/arc/net/bpf_jit_arcv2.c b/arch/arc/net/bpf_jit_arcv2.c
index 31bfb6e9ce00..4458e409ca0a 100644
--- a/arch/arc/net/bpf_jit_arcv2.c
+++ b/arch/arc/net/bpf_jit_arcv2.c
@@ -62,7 +62,7 @@ enum {
* If/when we decide to add ARCv2 instructions that do use register pairs,
* the mapping, hopefully, doesn't need to be revisited.
*/
-const u8 bpf2arc[][2] = {
+static const u8 bpf2arc[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
/* Arguments from eBPF program to in-kernel function */
@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
/************* Packers (Deal with BPF_REGs) **************/
-inline u8 zext(u8 *buf, u8 rd)
+u8 zext(u8 *buf, u8 rd)
{
if (rd != BPF_REG_FP)
return arc_movi_r(buf, REG_HI(rd), 0);
@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
+ break;
}
} else {
/*
@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
+ break;
}
}
@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
-const struct {
+static const struct {
/*
* "jit_off" is common between all "jmp[]" and is coupled with
* "cond" of each "jmp[]" instance. e.g.:
@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
* instruction that precedes the conditional branch.
*/
-const u8 arcv2_32_jmps[ARC_CC_LAST] = {
+static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
[ARC_CC_UGT] = CC_great_u,
[ARC_CC_UGE] = CC_great_eq_u,
[ARC_CC_ULT] = CC_less_u,
diff --git a/arch/arc/net/bpf_jit_core.c b/arch/arc/net/bpf_jit_core.c
index 6f6b4ffccf2c..e3628922c24a 100644
--- a/arch/arc/net/bpf_jit_core.c
+++ b/arch/arc/net/bpf_jit_core.c
@@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
/* Initialise the context so there's no garbage. */
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
{
- memset(ctx, 0, sizeof(ctx));
+ memset(ctx, 0, sizeof(*ctx));
ctx->orig_prog = prog;
@@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
ctx->prog = bpf_jit_blind_constants(prog);
if (IS_ERR(ctx->prog))
return PTR_ERR(ctx->prog);
- ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
+ ctx->blinded = (ctx->prog != ctx->orig_prog);
/* If the verifier doesn't zero-extend, then we have to do it. */
ctx->do_zext = !ctx->prog->aux->verifier_zext;
@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
}
/*
- * All the "handle_*()" functions have been called before by the
- * "jit_prepare()". If there was an error, we would know by now.
- * Therefore, no extra error checking at this point, other than
- * a sanity check at the end that expects the calculated length
- * (jit.len) to be equal to the length of generated instructions
- * (jit.index).
+ * jit_compile() is the real compilation phase. jit_prepare() is
+ * invoked before jit_compile() as a dry-run to make sure everything
+ * will go OK and allocate the necessary memory.
+ *
+ * In the end, jit_compile() checks if it has produced the same number
+ * of instructions as jit_prepare() would.
*/
static int jit_compile(struct jit_context *ctx)
{
@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
/*
* This function may be invoked twice for the same stream of BPF
- * instructions. The "extra pass" happens, when there are "call"s
- * involved that their addresses are not known during the first
- * invocation.
+ * instructions. The "extra pass" happens, when there are
+ * (re)locations involved that their addresses are not known
+ * during the first run.
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
index d80440446473..05d7a462ea25 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
@@ -85,7 +85,7 @@
};
};
- panel {
+ panel_dpi: panel {
compatible = "sii,43wvf1g";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display_power>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
index c84e9b052527..151e9cee3c87 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
@@ -10,8 +10,6 @@
/plugin/;
&{/} {
- /delete-node/ panel;
-
hdmi: connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
@@ -82,6 +80,10 @@
};
};
+&panel_dpi {
+ status = "disabled";
+};
+
&tve {
status = "disabled";
};
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 78282ced5038..e408399d5f0e 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -14,6 +14,7 @@
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
+#include <asm/uaccess.h>
#ifdef CONFIG_EFI
void efi_init(void);
@@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
+#ifdef CONFIG_CPU_TTBR0_PAN
+#undef arch_efi_call_virt
+#define arch_efi_call_virt(p, f, args...) ({ \
+ unsigned int flags = uaccess_save_and_enable(); \
+ efi_status_t res = _Generic((p)->f(args), \
+ efi_status_t: (p)->f(args), \
+ default: ((p)->f(args), EFI_ABORTED)); \
+ uaccess_restore(flags); \
+ res; \
+})
+#endif
+
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index a0b6d1e3812f..e61591f33a6c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long old;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
+err_out:
return;
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
- /* FP points one word below parent's top of stack */
- frame_pointer += 4;
+ /*
+ * Usually, the stack frames are contiguous in memory but cases
+ * have been observed where the next stack frame does not live
+ * at 'frame_pointer + 4' as this code used to assume.
+ *
+ * Instead, dereference the field in the stack frame that
+ * stores the SP of the calling frame: to avoid unbounded
+ * recursion, this cannot involve any ftrace instrumented
+ * functions, so use the __get_kernel_nofault() primitive
+ * directly.
+ */
+ __get_kernel_nofault(&frame_pointer,
+ (unsigned long *)(frame_pointer - 8),
+ unsigned long, err_out);
} else {
struct stackframe frame = {
.fp = frame_pointer,
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 4768b05fd765..98544741ce17 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mm.dtsi"
+#include "imx8mm-overdrive.dtsi"
/ {
chosen {
@@ -935,7 +936,7 @@
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
pinctrl_gpio_9_dsi: gpio9dsigrp {
fsl,pins =
- <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
+ <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
};
/* Verdin GPIO_10_DSI (pulled-up as active-low) */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index 43f1d45ccc96..f5115f9e8c47 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -254,7 +254,7 @@
<&clk IMX8MP_CLK_CLKOUT2>,
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <13000000>, <13000000>, <156000000>;
+ assigned-clock-rates = <13000000>, <13000000>, <208000000>;
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index dec57fad6828..e2b5e7ac3e46 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -219,7 +219,7 @@
bluetooth {
compatible = "brcm,bcm4330-bt";
- shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 5c6b39c6933f..6e05361c1ffb 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -36,7 +36,7 @@
regulator-name = "SD1_SPWR";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index d400d85f42a9..bd98eff4d685 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -296,7 +296,6 @@
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
status = "okay";
- no-sdio;
no-mmc;
};
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index e4546b29dd0c..fd87c4b8f984 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -146,7 +146,7 @@
/* Coprocessor traps */
.macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1
- mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
+ mov x0, #CPACR_ELx_FPEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
@@ -277,7 +277,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
- orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+ orr x0, x0, #CPACR_ELx_ZEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
@@ -298,7 +298,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
- orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
+ orr x0, x0, #CPACR_ELx_SMEN
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4ff0ae3f6d66..41fd90895dfc 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* emit the large TLP from the CPU.
*/
-static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
- const u32 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned32(to, from, count);
dgh();
} else {
__iowrite32_copy_full(to, from, count);
}
}
+#define __iowrite32_copy __iowrite32_copy
-#define __iowrite32_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite32_copy(to, from, count) : \
- __iowrite32_copy_full(to, from, count))
-
-static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
- const u64 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned64(to, from, count);
dgh();
} else {
__iowrite64_copy_full(to, from, count);
}
}
-
-#define __iowrite64_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite64_copy(to, from, count) : \
- __iowrite64_copy_full(to, from, count))
+#define __iowrite64_copy __iowrite64_copy
/*
* I/O memory mapping functions.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index e01bb5ca13b7..b2adc2c6c82a 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -305,6 +305,12 @@
GENMASK(19, 14) | \
BIT(11))
+#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \
+ GENMASK(27, 26) | \
+ GENMASK(23, 22) | \
+ GENMASK(19, 18) | \
+ GENMASK(15, 0))
+
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 501e3e019c93..21650e7924d4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
+#define __build_check_all_or_none(r, bits) \
+ BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
+
+#define __cpacr_to_cptr_clr(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((set) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((set) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((set) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((clr) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((clr) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((clr) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define __cpacr_to_cptr_set(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((clr) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((clr) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((clr) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((set) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((set) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((set) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define cpacr_clear_set(clr, set) \
+ do { \
+ BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
+ BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
+ __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((set), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((set), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
+ __build_check_all_or_none((set), CPACR_ELx_SMEN); \
+ \
+ if (has_vhe() || has_hvhe()) \
+ sysreg_clear_set(cpacr_el1, clr, set); \
+ else \
+ sysreg_clear_set(cptr_el2, \
+ __cpacr_to_cptr_clr(clr, set), \
+ __cpacr_to_cptr_set(clr, set));\
+ } while (0)
+
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
@@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
u64 val;
if (has_vhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL1EN);
+ val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ val = CPACR_ELx_FPEN;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
- val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
+ val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
+ val |= CPACR_ELx_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8170c04fde91..36b8e97bf49e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_host_sve_max_vl;
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
u64 *vncr_array;
};
+struct cpu_sve_state {
+ __u64 zcr_el1;
+
+ /*
+ * Ordering is important since __sve_save_state/__sve_restore_state
+ * relies on it.
+ */
+ __u32 fpsr;
+ __u32 fpcr;
+
+ /* Must be SVE_VQ_BYTES (128 bit) aligned. */
+ __u8 sve_regs[];
+};
+
/*
* This structure is instantiated on a per-CPU basis, and contains
* data that is:
@@ -534,7 +549,15 @@ struct kvm_cpu_context {
*/
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
- struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+
+ /*
+ * All pointers in this union are hyp VA.
+ * sve_state is only used in pKVM and if system_supports_sve().
+ */
+ union {
+ struct user_fpsimd_state *fpsimd_state;
+ struct cpu_sve_state *sve_state;
+ };
/* Ownership of the FP regs */
enum {
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 3e80464f8953..b05bceca3385 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-void __sve_restore_state(void *sve_pffr, u32 *fpsr);
+void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
u64 __guest_enter(struct kvm_vcpu *vcpu);
@@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index ad9cfb5c1ff4..cd56acd9a842 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
+static inline size_t pkvm_host_sve_state_size(void)
+{
+ if (!system_supports_sve())
+ return 0;
+
+ return size_add(sizeof(struct cpu_sve_state),
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
+}
+
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 9943ff0af4c9..1f60aa1bc750 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -170,6 +170,7 @@
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
+#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 266b96acc014..1386e8e751f2 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index dd6ce86d4332..b776e7424fe9 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
struct insn_emulation *insn = insn_emulations[i];
bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
+ if (insn->status == INSN_UNAVAILABLE)
+ continue;
+
if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->name);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 4a92096db34e..712718aed5dd 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <linux/init.h>
+#include <linux/kmemleak.h>
#include <linux/screen_info.h>
#include <linux/vmalloc.h>
@@ -213,6 +214,7 @@ l: if (!p) {
return -ENOMEM;
}
+ kmemleak_not_leak(p);
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 5fa08e13e17e..f374a3e5a5fe 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -173,7 +173,7 @@ static void __init remap_idmap_for_lpa2(void)
* Don't bother with the FDT, we no longer need it after this.
*/
memset(init_idmap_pg_dir, 0,
- (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
+ (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
create_init_idmap(init_idmap_pg_dir, mask);
dsb(ishst);
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index ad198262b981..7230f6e20ab8 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -53,17 +53,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_set_return_value(current, regs, 0, ret);
/*
- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
- * but not enough for arm64 stack utilization comfort. To keep
- * reasonable stack head room, reduce the maximum offset to 9 bits.
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: the AAPCS mandates a
+ * 16-byte aligned SP at function boundaries, which will remove the
+ * 4 low bits from any entropy chosen here.
*
- * The actual entropy will be further reduced by the compiler when
- * applying stack alignment constraints: the AAPCS mandates a
- * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
- *
- * The resulting 5 bits of entropy is seen in SP[8:4].
+ * The resulting 6 bits of entropy is seen in SP[9:4].
*/
- choose_random_kstack_offset(get_random_u16() & 0x1FF);
+ choose_random_kstack_offset(get_random_u16());
}
static inline bool has_syscall_work(unsigned long flags)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9996a989b52e..59716789fe0f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
+static size_t pkvm_host_sve_state_order(void)
+{
+ return get_order(pkvm_host_sve_state_size());
+}
+
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
static void __init teardown_hyp_mode(void)
{
+ bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
int cpu;
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
+
+ if (free_sve) {
+ struct cpu_sve_state *sve_state;
+
+ sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+ free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
+ }
}
}
@@ -2398,6 +2411,58 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
+static int init_pkvm_host_sve_state(void)
+{
+ int cpu;
+
+ if (!system_supports_sve())
+ return 0;
+
+ /* Allocate pages for host sve state in protected mode. */
+ for_each_possible_cpu(cpu) {
+ struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
+
+ if (!page)
+ return -ENOMEM;
+
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
+ }
+
+ /*
+ * Don't map the pages in hyp since these are only used in protected
+ * mode, which will (re)create its own mapping when initialized.
+ */
+
+ return 0;
+}
+
+/*
+ * Finalizes the initialization of hyp mode, once everything else is initialized
+ * and the initialziation process cannot fail.
+ */
+static void finalize_init_hyp_mode(void)
+{
+ int cpu;
+
+ if (system_supports_sve() && is_protected_kvm_enabled()) {
+ for_each_possible_cpu(cpu) {
+ struct cpu_sve_state *sve_state;
+
+ sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
+ kern_hyp_va(sve_state);
+ }
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct user_fpsimd_state *fpsimd_state;
+
+ fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
+ kern_hyp_va(fpsimd_state);
+ }
+ }
+}
+
static void pkvm_hyp_init_ptrauth(void)
{
struct kvm_cpu_context *hyp_ctxt;
@@ -2566,6 +2631,10 @@ static int __init init_hyp_mode(void)
goto out_err;
}
+ err = init_pkvm_host_sve_state();
+ if (err)
+ goto out_err;
+
err = kvm_hyp_init_protection(hyp_va_bits);
if (err) {
kvm_err("Failed to init hyp memory protection\n");
@@ -2730,6 +2799,13 @@ static __init int kvm_arm_init(void)
if (err)
goto out_subs;
+ /*
+ * This should be called after initialization is done and failure isn't
+ * possible anymore.
+ */
+ if (!in_hyp_mode)
+ finalize_init_hyp_mode();
+
kvm_arm_initialised = true;
return 0;
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 72d733c74a38..54090967a335 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
if (forward_traps(vcpu, HCR_NV))
return;
+ spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
+ spsr = kvm_check_illegal_exception_return(vcpu, spsr);
+
/* Check for an ERETAx */
esr = kvm_vcpu_get_esr(vcpu);
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
/*
- * Oh no, ERETAx failed to authenticate. If we have
- * FPACCOMBINE, deliver an exception right away. If we
- * don't, then let the mangled ELR value trickle down the
+ * Oh no, ERETAx failed to authenticate.
+ *
+ * If we have FPACCOMBINE and we don't have a pending
+ * Illegal Execution State exception (which has priority
+ * over FPAC), deliver an exception right away.
+ *
+ * Otherwise, let the mangled ELR value trickle down the
* ERET handling, and the guest will have a little surprise.
*/
- if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+ if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
esr &= ESR_ELx_ERET_ISS_ERETA;
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
kvm_inject_nested_sync(vcpu, esr);
@@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_arch_vcpu_put(vcpu);
- spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
- spsr = kvm_check_illegal_exception_return(vcpu, spsr);
if (!esr_iss_is_eretax(esr))
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
- /*
- * Note that the current exception level is always the virtual EL2,
- * since we set HCR_EL2.NV bit only when entering the virtual EL2.
- */
*vcpu_pc(vcpu) = elr;
*vcpu_cpsr(vcpu) = spsr;
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 1807d3a79a8a..521b32868d0d 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
fpsimd_save_and_flush_cpu_state();
}
}
+
+ /*
+ * If normal guests gain SME support, maintain this behavior for pKVM
+ * guests, which don't support SME.
+ */
+ WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
+ read_sysreg_s(SYS_SVCR));
}
/*
@@ -161,9 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
- sysreg_clear_set(CPACR_EL1, 0,
- CPACR_EL1_SMEN_EL0EN |
- CPACR_EL1_SMEN_EL1EN);
+ sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index e2f762d959bb..11098eb7eb44 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i, nr_reg;
- switch (*vcpu_cpsr(vcpu)) {
+ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
/*
* Either we are dealing with user mode, and only the
* first 15 registers (+ PC) must be narrowed to 32bit.
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index 8d9670e6615d..449fa58cf3b6 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
u32 cpsr_cond;
int cond;
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_esr(vcpu) >> 30)
+ /*
+ * These are the exception classes that could fire with a
+ * conditional instruction.
+ */
+ switch (kvm_vcpu_trap_get_class(vcpu)) {
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_FP_ASIMD:
+ case ESR_ELx_EC_CP10_ID:
+ case ESR_ELx_EC_CP14_64:
+ case ESR_ELx_EC_SVC32:
+ break;
+ default:
return true;
+ }
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index 61e6f3ba7b7d..e950875e31ce 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+ mov x2, #1
+ sve_save 0, x1, x2, 3
+ ret
+SYM_FUNC_END(__sve_save_state)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a92566f36022..0c4de44534b7 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -316,10 +316,24 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
- &vcpu->arch.ctxt.fp_regs.fpsr);
+ &vcpu->arch.ctxt.fp_regs.fpsr,
+ true);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}
+static inline void __hyp_sve_save_host(void)
+{
+ struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+ sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+ &sve_state->fpsr,
+ true);
+}
+
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+
/*
* We trap the first access to the FP/SIMD to save the host context and
* restore the guest context lazily.
@@ -330,7 +344,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
- u64 reg;
if (!system_supports_fpsimd())
return false;
@@ -353,24 +366,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Valid trap. Switch the context: */
/* First disable enough traps to allow us to update the registers */
- if (has_vhe() || has_hvhe()) {
- reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
- if (sve_guest)
- reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
-
- sysreg_clear_set(cpacr_el1, 0, reg);
- } else {
- reg = CPTR_EL2_TFP;
- if (sve_guest)
- reg |= CPTR_EL2_TZ;
-
- sysreg_clear_set(cptr_el2, reg, 0);
- }
+ if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
+ cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+ else
+ cpacr_clear_set(0, CPACR_ELx_FPEN);
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
- __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+ kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
if (sve_guest)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 22f374e9f532..24a9a8330d19 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
}
void pkvm_hyp_vm_table_init(void *tbl);
-void pkvm_host_fpsimd_state_init(void);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva);
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 02746f9d0980..efb053af331c 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -177,6 +177,14 @@ static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
res);
}
+static void ffa_rx_release(struct arm_smccc_res *res)
+{
+ arm_smccc_1_1_smc(FFA_RX_RELEASE,
+ 0, 0,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
@@ -543,16 +551,19 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
+ ffa_rx_release(res);
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
+ ffa_rx_release(res);
goto out_unlock;
}
buf = ffa_desc_buf.buf;
memcpy(buf, hyp_buffers.rx, fraglen);
+ ffa_rx_release(res);
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
@@ -563,6 +574,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
fraglen = res->a3;
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
+ ffa_rx_release(res);
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index d5c48dc98f67..f43d845f3c4e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
+{
+ __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+ /*
+ * On saving/restoring guest sve state, always use the maximum VL for
+ * the guest. The layout of the data when saving the sve state depends
+ * on the VL, so use a consistent (i.e., the maximum) guest VL.
+ */
+ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
+ __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+}
+
+static void __hyp_sve_restore_host(void)
+{
+ struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+ /*
+ * On saving/restoring host sve state, always use the maximum VL for
+ * the host. The layout of the data when saving the sve state depends
+ * on the VL, so use a consistent (i.e., the maximum) host VL.
+ *
+ * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
+ * supported by the system (or limited at EL3).
+ */
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+ &sve_state->fpsr,
+ true);
+ write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
+}
+
+static void fpsimd_sve_flush(void)
+{
+ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
+static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
+{
+ if (!guest_owns_fp_regs())
+ return;
+
+ cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+ isb();
+
+ if (vcpu_has_sve(vcpu))
+ __hyp_sve_save_guest(vcpu);
+ else
+ __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
+
+ if (system_supports_sve())
+ __hyp_sve_restore_host();
+ else
+ __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+
+ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ fpsimd_sve_flush();
+
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
- hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
+ /* Limit guest vector length to the maximum supported by the host. */
+ hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
- hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
@@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
unsigned int i;
+ fpsimd_sve_sync(&hyp_vcpu->vcpu);
+
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
- host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
@@ -79,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;
+ /*
+ * KVM (and pKVM) doesn't support SME guests for now, and
+ * ensures that SME features aren't enabled in pstate when
+ * loading a vcpu. Therefore, if SME features enabled the host
+ * is misbehaving.
+ */
+ if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
@@ -405,11 +477,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
- if (has_hvhe())
- sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN));
- else
- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+ cpacr_clear_set(0, CPACR_ELx_ZEN);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 16aa4875ddb8..95cf18574251 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -18,6 +18,8 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits;
+unsigned int kvm_host_sve_max_vl;
+
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@@ -63,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
if (has_hvhe())
- cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ cptr_clear |= CPACR_ELx_ZEN;
else
cptr_set |= CPTR_EL2_TZ;
}
@@ -247,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
-void pkvm_host_fpsimd_state_init(void)
-{
- unsigned long i;
-
- for (i = 0; i < hyp_nr_cpus; i++) {
- struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
-
- host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
- }
-}
-
/*
* Return the hyp vm structure corresponding to the handle.
*/
@@ -586,6 +577,8 @@ unlock:
if (ret)
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+ hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
+
return ret;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 859f22f754d3..f4350ba07b0b 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
return 0;
}
+static int pkvm_create_host_sve_mappings(void)
+{
+ void *start, *end;
+ int ret, i;
+
+ if (!system_supports_sve())
+ return 0;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
+ struct cpu_sve_state *sve_state = host_data->sve_state;
+
+ start = kern_hyp_va(sve_state);
+ end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
+ ret = pkvm_create_mappings(start, end, PAGE_HYP);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
unsigned long *per_cpu_base,
u32 hyp_va_bits)
@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret;
}
+ pkvm_create_host_sve_mappings();
+
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@@ -300,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
- pkvm_host_fpsimd_state_init();
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6758cd905570..6af179c6356d 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) {
if (has_hvhe())
- val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+ val &= ~CPACR_ELx_SMEN;
else
val |= CPTR_EL2_TSM;
}
if (!guest_owns_fp_regs()) {
if (has_hvhe())
- val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
else
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
@@ -182,6 +181,25 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Non-protected kvm relies on the host restoring its sve state.
+ * Protected kvm restores the host's sve state as not to reveal that
+ * fpsimd was used by a guest nor leak upper sve bits.
+ */
+ if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
+ __hyp_sve_save_host();
+
+ /* Re-enable SVE traps if not supported for the guest vcpu. */
+ if (!vcpu_has_sve(vcpu))
+ cpacr_clear_set(CPACR_ELx_ZEN, 0);
+
+ } else {
+ __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+ }
+}
+
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index d7af5f46f22a..8fbb6a2e0559 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA;
- val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
- CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
+ val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu))
- val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ val |= CPACR_ELx_ZEN;
} else {
- val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ val &= ~CPACR_ELx_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@@ -262,6 +261,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+ __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+}
+
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 6813c7c7f00a..bae8536cbf00 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
break;
case SYS_ID_AA64PFR1_EL1:
- /* Only support SSBS */
- val &= NV_FTR(PFR1, SSBS);
+ /* Only support BTI, SSBS, CSV2_frac */
+ val &= (NV_FTR(PFR1, BT) |
+ NV_FTR(PFR1, SSBS) |
+ NV_FTR(PFR1, CSV2_frac));
break;
case SYS_ID_AA64MMFR0_EL1:
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 1b7b58cb121f..3fc8ca164dbe 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
/* Maximum phys_shift supported for any VM on this host */
static u32 __ro_after_init kvm_ipa_limit;
+unsigned int __ro_after_init kvm_host_sve_max_vl;
/*
* ARMv8 Reset Values
@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
{
if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl();
+ kvm_host_sve_max_vl = sve_max_vl();
+ kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/*
* The get_sve_reg()/set_sve_reg() ioctl interface will need
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 8f5b7a3e7009..7f68cf58b978 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -391,7 +391,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index a3983a631b5a..9e50928f5d7d 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -919,8 +919,19 @@ free:
return ret;
}
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
+ struct kvm_vcpu *vcpu;
+ unsigned long c;
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ /* Garbage collect the region */
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (vcpu->arch.vgic_cpu.rdreg == rdreg)
+ vcpu->arch.vgic_cpu.rdreg = NULL;
+ }
+
list_del(&rdreg->list);
kfree(rdreg);
}
@@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 6106ebd5ba42..03d356a12377 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -316,7 +316,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 9f9486de0004..a3edced29ac1 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
* clearing access/dirty for the whole block.
*/
unsigned long start = addr;
- unsigned long end = start + nr;
+ unsigned long end = start + nr * PAGE_SIZE;
if (pte_cont(__ptep_get(ptep + nr - 1)))
end = ALIGN(end, CONT_PTE_SIZE);
@@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep);
}
- __clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
+ __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c927e9312f10..353ea5dc32b8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -124,7 +124,8 @@ bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG |
+ PTE_SWBITS_MASK;
/* creating or taking down mappings is always safe */
if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index 7ff6a2466af1..e0594b6370a6 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -6,6 +6,7 @@
#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>
#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c
index 3d30e58a45d2..4540a271ee39 100644
--- a/arch/csky/kernel/syscall.c
+++ b/arch/csky/kernel/syscall.c
@@ -20,7 +20,7 @@ SYSCALL_DEFINE6(mmap2,
unsigned long, prot,
unsigned long, flags,
unsigned long, fd,
- off_t, offset)
+ unsigned long, offset)
{
if (unlikely(offset & (~PAGE_MASK >> 12)))
return -EINVAL;
diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h
new file mode 100644
index 000000000000..40f2d08bec92
--- /dev/null
+++ b/arch/hexagon/include/asm/syscalls.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm-generic/syscalls.h>
+
+asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
+ u32 a2, u32 a3, u32 a4, u32 a5);
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index 432c4db1b623..21ae22306b5d 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -36,5 +36,6 @@
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>
diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
index 0fadd582cfc7..5d98bdc494ec 100644
--- a/arch/hexagon/kernel/syscalltab.c
+++ b/arch/hexagon/kernel/syscalltab.c
@@ -14,6 +14,13 @@
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
+ SC_ARG64(offset), SC_ARG64(len))
+{
+ return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
+}
+#define sys_fadvise64_64 sys_hexagon_fadvise64_64
+
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index e38139c576ee..ddc042895d01 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -143,7 +143,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -261,6 +261,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
+config AS_HAS_THIN_ADD_SUB
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index 98d60630c3d4..8b2ce5b5d43e 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
+ depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
index 8aefb0c12672..a34734a6c3ce 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -44,14 +44,14 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x0>;
};
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x1>;
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 8463fe035386..23cf26cc3e5f 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -43,7 +43,7 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy0>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -58,7 +58,7 @@
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy1>;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index 74b99bd234cc..ea9e6985d0e9 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -92,7 +92,7 @@
&gmac2 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy2>;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index 21447fb1efc7..d78330916bd1 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
+#define CTRL_PLV0_ENABLE 0x02
+#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset);
+ int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index 27f319b49862..b5f9de9f102e 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void set_cpuid_to_node(int cpuid, s16 node) { }
static inline int early_cpu_to_node(int cpu)
{
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 45b507a7b06f..d9eafd3ee3d1 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -42,7 +42,7 @@
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
- or \temp1, \temp1, \temp2
+ bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c4f7de2e2805..4677ea8fa8e9 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -22,7 +22,7 @@
_head:
.word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x8
- .dword kernel_entry /* Kernel entry point */
+ .dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index fc55c4de2a11..621ad7634df7 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
- u32 ctrl;
+ u32 ctrl, privilege;
int i, max_slots, enable;
+ struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ if (arch_check_bp_in_kernelspace(info))
+ privilege = CTRL_PLV0_ENABLE;
+ else
+ privilege = CTRL_PLV3_ENABLE;
+
+ /* Whether bp belongs to a task. */
+ if (bp->hw.target)
+ regs = task_pt_regs(bp->hw.target);
+
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
- write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
- write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
- write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
- write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ if (bp->hw.target)
+ regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
- write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
- write_wb_reg(CSR_CFG_MASK, i, 0, 0);
- write_wb_reg(CSR_CFG_MASK, i, 1, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ } else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ }
+ if (bp->hw.target)
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset)
+ int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
- if (!ctrl.len)
- return -EINVAL;
-
- *offset = __ffs(ctrl.len);
-
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
- u64 alignment_mask, offset;
+ u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
- alignment_mask = 0x7;
- else
+ if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
- offset = hw->address & alignment_mask;
-
- hw->address &= ~alignment_mask;
- hw->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ }
return 0;
}
@@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
- bp = slots[i];
- if (bp == NULL)
- continue;
- perf_bp_event(bp, regs);
+ if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
+ update_bp_registers(regs, 0, 0);
+ }
}
- update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
- wp = slots[i];
- if (wp == NULL)
- continue;
- perf_bp_event(wp, regs);
+ if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
+ update_bp_registers(regs, 0, 1);
+ }
}
- update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index c114c5ef1332..200109de1971 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, offset;
+ int err, len, type;
- err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
- switch (note_type) {
- case NT_LOONGARCH_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_LOONGARCH_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
attr->bp_len = len;
attr->bp_type = type;
- attr->bp_addr += offset;
return 0;
}
@@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
return PTR_ERR(bp);
attr = bp->attr;
- decode_ctrl_reg(uctrl, &ctrl);
- err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
- if (err)
- return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ decode_ctrl_reg(uctrl, &ctrl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (uctrl & CTRL_PLV_ENABLE) {
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+ attr.disabled = 0;
+ } else {
+ attr.disabled = 1;
+ }
return modify_user_hw_breakpoint(bp, &attr);
}
@@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
+ /* Kernel-space address cannot be monitored by user-space */
+ if ((unsigned long)addr >= XKPRANGE)
+ return -EINVAL;
+
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 60e0fe97f61a..3d048f1be143 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
return;
/* Prefer to use built-in dtb, checking its legality first. */
- if (!fdt_check_header(__dtb_start))
+ if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
fdt_pointer = __dtb_start;
else
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
@@ -351,10 +351,8 @@ void __init platform_init(void)
arch_reserve_vmcore();
arch_reserve_crashkernel();
-#ifdef CONFIG_ACPI_TABLE_UPGRADE
- acpi_table_upgrade();
-#endif
#ifdef CONFIG_ACPI
+ acpi_table_upgrade();
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 0dfe2388ef41..1436d2465939 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void)
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
- numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
@@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void)
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
+
+ early_numa_add_cpu(cpu, 0);
+ set_cpuid_to_node(cpuid, 0);
}
loongson_sysconf.nr_cpus = num_processors;
@@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void)
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
+ numa_add_cpu(0);
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index b4c5acd7aa3b..8801611143ab 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -22,7 +22,7 @@
#define __SYSCALL(nr, call) [nr] = (call),
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
- prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+ prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index e8e97dbf9ca4..3c7595342730 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
+#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -142,10 +143,11 @@ SECTIONS
#ifdef CONFIG_EFI_STUB
/* header symbols */
- _kernel_asize = _end - _text;
- _kernel_fsize = _edata - _text;
- _kernel_vsize = _end - __initdata_begin;
- _kernel_rsize = _edata - __initdata_begin;
+ _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
+ _kernel_asize = ABSOLUTE(_end - _text);
+ _kernel_fsize = ABSOLUTE(_edata - _text);
+ _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
+ _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
#endif
.gptab.sdata : {
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index c86e099af5ca..a68573e091c0 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -761,7 +761,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
default:
ret = KVM_HCALL_INVALID_CODE;
break;
- };
+ }
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index ed9f34da1a2a..0850b099f300 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -35,7 +35,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
- off_t, pgoff)
+ unsigned long, pgoff)
{
if (pgoff & ~PAGE_MASK)
return -EINVAL;
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index ec180ab92eaa..66a8ba19c287 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
- bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
+ !!BMIPS_GET_CBR();
}
static void bcm6368_quirks(void)
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 30e86861c206..b1ee3c48e84b 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -322,7 +322,7 @@ static inline void ehb(void)
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
- " mftc0 $1, " #rt ", " #sel " \n" \
+ " mftc0 %0, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index cc869f5d5693..953f5b7dc723 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -354,7 +354,7 @@
412 n32 utimensat_time64 sys_utimensat
413 n32 pselect6_time64 compat_sys_pselect6_time64
414 n32 ppoll_time64 compat_sys_ppoll_time64
-416 n32 io_pgetevents_time64 sys_io_pgetevents
+416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
418 n32 mq_timedsend_time64 sys_mq_timedsend
419 n32 mq_timedreceive_time64 sys_mq_timedreceive
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 008ebe60263e..2439a2491cff 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -27,7 +27,7 @@
17 o32 break sys_ni_syscall
# 18 was sys_stat
18 o32 unused18 sys_ni_syscall
-19 o32 lseek sys_lseek
+19 o32 lseek sys_lseek compat_sys_lseek
20 o32 getpid sys_getpid
21 o32 mount sys_mount
22 o32 umount sys_oldumount
@@ -403,7 +403,7 @@
412 o32 utimensat_time64 sys_utimensat sys_utimensat
413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index 874ed6df9768..34b9323bdabb 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -112,8 +112,8 @@ retry:
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
- if (ret == 0xffffffff || ret == 0x00000000 ||
- ret == 0x0000ffff || ret == 0xffff0000) {
+ if (*val == 0xffffffff || *val == 0x00000000 ||
+ *val == 0x0000ffff || *val == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index daafeb20f993..dc9b902de8ea 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,6 +16,7 @@ config PARISC
select ARCH_HAS_UBSAN
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
+ select ARCH_SPLIT_ARG64 if !64BIT
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index ba4c05bc24d6..8394718870e1 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
-void flush_kernel_dcache_page_addr(const void *addr);
-
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
+/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
-#define flush_cache_vmap(start, end) flush_cache_all()
+void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) flush_cache_all()
+void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* defined in pacache.S exported in cache.c used by flush_anon_page */
-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
-static inline void kunmap_flush_on_unmap(const void *addr)
-{
- flush_kernel_dcache_page_addr(addr);
-}
+void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 974accac05cd..babf65751e81 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#define ptep_get ptep_get
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
- if (!pte_young(*ptep))
- return 0;
-
- pte = *ptep;
+ pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
-struct mm_struct;
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte;
-
- old_pte = *ptep;
- set_pte(ptep, __pte(0));
-
- return old_pte;
-}
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 422f3e1e6d9c..483bfafd930c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
+#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
+
+/*
+ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
+ * of page flushes done flush_cache_page_if_present. There are some
+ * pros and cons in using this option. It may increase the risk of
+ * random segmentation faults.
+ */
+#define CONFIG_FLUSH_PAGE_ACCESSED 0
+
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
+/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
-/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
+static void flush_kernel_dcache_page_addr(const void *addr);
+
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
+
+ /*
+ * The TLB is the engine of coherence on parisc. The CPU is
+ * entitled to speculate any page with a TLB mapping, so here
+ * we kill the mapping then flush the page along a special flush
+ * only alias mapping. This guarantees that the page is no-longer
+ * in the cache for any process and nor may it be speculatively
+ * read in (until the user or kernel specifically accesses it,
+ * of course).
+ */
+ flush_tlb_page(vma, vmaddr);
+
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+static void flush_kernel_dcache_page_addr(const void *addr)
{
- unsigned long flags, space, pgd, prot;
-#ifdef CONFIG_TLB_PTLOCK
- unsigned long pgd_lock;
-#endif
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- vmaddr &= PAGE_MASK;
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
+ flush_dcache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+}
- /* Set context for flush */
- local_irq_save(flags);
- prot = mfctl(8);
- space = mfsp(SR_USER);
- pgd = mfctl(25);
-#ifdef CONFIG_TLB_PTLOCK
- pgd_lock = mfctl(28);
-#endif
- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
- local_irq_restore(flags);
-
- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- flush_tlb_page(vma, vmaddr);
+static void flush_kernel_icache_page_addr(const void *addr)
+{
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- /* Restore previous context */
- local_irq_save(flags);
-#ifdef CONFIG_TLB_PTLOCK
- mtctl(pgd_lock, 28);
-#endif
- mtctl(pgd, 25);
- mtsp(space, SR_USER);
- mtctl(prot, 8);
- local_irq_restore(flags);
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent instruction cache move-in */
+ preempt_disable();
+ flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
+void kunmap_flush_on_unmap(const void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_flush_on_unmap);
+
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
- flush_kernel_icache_page(kaddr);
+ flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
+/*
+ * Walk page directory for MM to find PTEP pointer for address ADDR.
+ */
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
+/*
+ * Return user physical address. Returns 0 if page is not present.
+ */
+static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags, space, pgd, prot, pa;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ /* Save context */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+
+ /* Set context for lpa_user */
+ switch_mm_irqs_off(NULL, mm, NULL);
+ pa = lpa_user(addr);
+
+ /* Restore previous context */
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
+ return pa;
+}
+
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
- if (parisc_requires_coherency()) {
- for (i = 0; i < nr; i++) {
- pte_t *ptep = get_ptep(vma->vm_mm,
- addr + i * PAGE_SIZE);
- if (!ptep)
- continue;
- if (pte_needs_flush(*ptep))
- flush_user_cache_page(vma,
- addr + i * PAGE_SIZE);
- /* Optimise accesses to the same table? */
- pte_unmap(ptep);
- }
- } else {
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
- * The TLB is the engine of coherence on parisc:
- * The CPU is entitled to speculate any page
- * with a TLB mapping, so here we kill the
- * mapping then flush the page along a special
- * flush only alias mapping. This guarantees that
- * the page is no-longer in the cache for any
- * process and nor may it be speculatively read
- * in (until the user or kernel specifically
- * accesses it, of course)
+ * Software is allowed to have any number
+ * of private mappings to a page.
*/
- for (i = 0; i < nr; i++)
- flush_tlb_page(vma, addr + i * PAGE_SIZE);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- for (i = 0; i < nr; i++)
- __flush_cache_page(vma,
- addr + i * PAGE_SIZE,
- (pfn + i) * PAGE_SIZE);
- /*
- * Software is allowed to have any number
- * of private mappings to a page.
- */
- if (!(vma->vm_flags & VM_SHARED))
- continue;
- if (old_addr)
- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, vma->vm_file);
- if (nr == folio_nr_pages(folio))
- old_addr = addr;
- }
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
+ old_addr = addr;
}
WARN_ON(++count == 4096);
}
@@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
-void flush_kernel_dcache_page_addr(const void *addr)
-{
- unsigned long flags;
-
- flush_kernel_dcache_page_asm(addr);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-
static void flush_cache_page_if_present(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn)
+ unsigned long vmaddr)
{
+#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
- pte_t *ptep;
+ pte_t *ptep, pte;
- /*
- * The pte check is racy and sometimes the flush will trigger
- * a non-access TLB miss. Hopefully, the page has already been
- * flushed.
- */
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
+ pte = ptep_get(ptep);
+ needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
- flush_cache_page(vma, vmaddr, pfn);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
+#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physaddr = get_upa(mm, vmaddr);
+
+ if (physaddr)
+ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
+#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- pte_t *ptep;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- bool needs_flush = false;
- /*
- * The vma can contain pages that aren't present. Although
- * the pte search is expensive, we need the pte to find the
- * page pfn and to check whether the page should be flushed.
- */
- ptep = get_ptep(vma->vm_mm, addr);
- if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
- pfn = pte_pfn(*ptep);
- pte_unmap(ptep);
- }
- if (needs_flush) {
- if (parisc_requires_coherency()) {
- flush_user_cache_page(vma, addr);
- } else {
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
- flush_cache_all();
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_all();
+ else
+ flush_data_cache();
return;
}
- flush_cache_pages(vma, start, end);
+ flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- if (parisc_requires_coherency())
- flush_user_cache_page(vma, vmaddr);
- else
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
- if (parisc_requires_coherency()) {
- if (vma->vm_flags & VM_SHARED)
- flush_data_cache();
- else
- flush_user_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
+#endif
+ return 1;
+}
+
+/*
+ * After a PTE is cleared, we have no way to flush the cache for
+ * the physical page. On PA8800 and PA8900 processors, these lines
+ * can cause random cache corruption. Thus, we must flush the cache
+ * as well as the TLB when clearing a PTE that's valid.
+ */
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ else if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, addr);
+
+ return pte;
+}
+
+/*
+ * The physical address for pages in the ioremap case can be obtained
+ * from the vm_struct struct. I wasn't able to successfully handle the
+ * vmalloc and vmap cases. We have an array of struct page pointers in
+ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
+ */
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long addr, physaddr;
+ struct vm_struct *vm;
+
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+
+ if (end - start >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
+ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
+ flush_cache_all();
+ return;
+ }
+
+ vm = find_vm_area((void *)start);
+ if (WARN_ON_ONCE(!vm)) {
+ flush_cache_all();
+ return;
+ }
+
+ /* The physical addresses of IOREMAP regions are contiguous */
+ if (vm->flags & VM_IOREMAP) {
+ physaddr = vm->phys_addr;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, start);
+ flush_icache_page_asm(physaddr, start);
+ preempt_enable();
+ physaddr += PAGE_SIZE;
+ }
+ return;
+ }
+
+ flush_cache_all();
}
+EXPORT_SYMBOL(flush_cache_vmap);
+/*
+ * The vm_struct has been retired and the page table is set up. The
+ * last page in the range is a guard page. Its physical address can't
+ * be determined using lpa, so there is no way to flush the range
+ * using flush_dcache_page_asm.
+ */
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+ flush_data_cache();
+}
+EXPORT_SYMBOL(flush_cache_vunmap);
+
+/*
+ * On systems with PA8800/PA8900 processors, there is no way to flush
+ * a vmap range other than using the architected loop to flush the
+ * entire cache. The page directory is not set up, so we can't use
+ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
+ * L2 is physically indexed but FDCE/FICE instructions in virtual
+ * mode output their virtual address on the core bus, not their
+ * real address. As a result, the L2 cache index formed from the
+ * virtual address will most likely not be the same as the L2 index
+ * formed from the real address.
+ */
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 2a12a547b447..826c8e51b585 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
-
-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
- compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
- const char __user * pathname)
-{
- return sys_fanotify_mark(fanotify_fd, flags,
- ((__u64)mask1 << 32) | mask0,
- dfd, pathname);
-}
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index b13c21373974..66dc406b12e4 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
-98 common recv sys_recv
+98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
-123 common recvfrom sys_recvfrom
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3c968f2f4ac4..c88c6d46a5bc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
- select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC_FPU
+ select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore
index e1094f08f713..e9fe73aac8b6 100644
--- a/arch/powerpc/crypto/.gitignore
+++ b/arch/powerpc/crypto/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
+aesp8-ppc.S
ghashp10-ppc.S
+ghashp8-ppc.S
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index de10437fd206..fd594bf6c6a9 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label) \
+ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
@@ -165,8 +181,19 @@ do { \
#endif
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm2_goto(x, addr, label) \
__get_user_asm_goto(x, addr, label, "ld")
+#else
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_goto_output( \
+ "1: ld%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 3656f1ca7a21..ebae8415dfbb 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -230,8 +230,10 @@
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
179 64 pread64 sys_pread64
+179 spu pread64 sys_pread64
180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
180 64 pwrite64 sys_pwrite64
+180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -246,6 +248,7 @@
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
191 64 readahead sys_readahead
+191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
@@ -293,6 +296,7 @@
232 nospu set_tid_address sys_set_tid_address
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
+233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
@@ -502,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b569ebaa590e..3ff3de9a52ac 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -130,14 +130,16 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
}
rcu_read_unlock();
- fdput(f);
-
- if (!found)
+ if (!found) {
+ fdput(f);
return -EINVAL;
+ }
table_group = iommu_group_get_iommudata(grp);
- if (WARN_ON(!table_group))
+ if (WARN_ON(!table_group)) {
+ fdput(f);
return -EFAULT;
+ }
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbltmp = table_group->tables[i];
@@ -158,8 +160,10 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
break;
}
}
- if (!tbl)
+ if (!tbl) {
+ fdput(f);
return -EINVAL;
+ }
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@@ -170,6 +174,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
/* stit is being destroyed */
iommu_tce_table_put(tbl);
rcu_read_unlock();
+ fdput(f);
return -ENOTTY;
}
/*
@@ -177,6 +182,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
* its KVM reference counter and can return.
*/
rcu_read_unlock();
+ fdput(f);
return 0;
}
rcu_read_unlock();
@@ -184,6 +190,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
iommu_tce_table_put(tbl);
+ fdput(f);
return -ENOMEM;
}
@@ -192,6 +199,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
list_add_rcu(&stit->next, &stt->iommu_tables);
+ fdput(f);
return 0;
}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 43b97032a91c..a0c4f1bde83e 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 8afc14a4a125..7703dcf48be8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -846,6 +846,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@@ -908,6 +917,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 6e7029640c0c..62da20f9700a 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -371,8 +371,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
- pr_err_once("Error can't get the LPAR name");
+ if (read_rtas_lpar_name(m))
+ read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b94176e25be1..0525ee2d63c7 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -106,7 +106,7 @@ config RISCV
select HAS_IOPORT if MMU
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
- select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
+ select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
index cd013588adc0..375ff2661b6e 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
+++ b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
@@ -45,6 +45,7 @@
no-1-8-v;
no-mmc;
no-sdio;
+ disable-wp;
};
&uart0 {
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index ddb002ed89de..808b4c78462e 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -10,7 +10,7 @@
#include <asm/fence.h>
-#define __arch_xchg_masked(prepend, append, r, p, n) \
+#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
({ \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
@@ -25,7 +25,7 @@
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
- " sc.w %1, %1, %2\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
@@ -46,7 +46,8 @@
: "memory"); \
})
-#define _arch_xchg(ptr, new, sfx, prepend, append) \
+#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \
+ sc_append, swap_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __new = (new); \
@@ -55,15 +56,15 @@
switch (sizeof(*__ptr)) { \
case 1: \
case 2: \
- __arch_xchg_masked(prepend, append, \
+ __arch_xchg_masked(sc_sfx, prepend, sc_append, \
__ret, __ptr, __new); \
break; \
case 4: \
- __arch_xchg(".w" sfx, prepend, append, \
+ __arch_xchg(".w" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
case 8: \
- __arch_xchg(".d" sfx, prepend, append, \
+ __arch_xchg(".d" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
default: \
@@ -73,16 +74,17 @@
})
#define arch_xchg_relaxed(ptr, x) \
- _arch_xchg(ptr, x, "", "", "")
+ _arch_xchg(ptr, x, "", "", "", "", "")
#define arch_xchg_acquire(ptr, x) \
- _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
+ _arch_xchg(ptr, x, "", "", "", \
+ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
- _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
+ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
#define arch_xchg(ptr, x) \
- _arch_xchg(ptr, x, ".aqrl", "", "")
+ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
#define xchg32(ptr, x) \
({ \
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
index 06e439eeef9a..09fde95a5e8f 100644
--- a/arch/riscv/include/asm/insn.h
+++ b/arch/riscv/include/asm/insn.h
@@ -145,7 +145,7 @@
/* parts of opcode for RVF, RVD and RVQ */
#define RVFDQ_FL_FS_WIDTH_OFF 12
-#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
+#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0)
#define RVFDQ_FL_FS_WIDTH_W 2
#define RVFDQ_FL_FS_WIDTH_D 3
#define RVFDQ_LS_FS_WIDTH_Q 4
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 1cc7df740edd..e6fbaaf54956 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
/* Make sure tidle is updated */
smp_mb();
bdata->task_ptr = tidle;
- bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
+ bdata->stack_ptr = task_pt_regs(tidle);
/* Make sure boot data is updated */
smp_mb();
hsm_data = __pa(bdata);
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index 613872b0a21a..24869eb88908 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
/* Make sure tidle is updated */
smp_mb();
- WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
- task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 87cbd86576b2..4b95c574fd04 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -120,9 +120,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
mutex_unlock(&text_mutex);
- if (!mod)
- local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
-
return out;
}
@@ -156,9 +153,9 @@ static int __ftrace_modify_code(void *data)
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
- }
- local_flush_icache_all();
+ local_flush_icache_all();
+ }
return 0;
}
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 4007563fb607..ab03732d06c4 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -89,6 +89,14 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
memset(waddr, c, len);
+ /*
+ * We could have just patched a function that is about to be
+ * called so make sure we don't execute partially patched
+ * instructions by flushing the icache as soon as possible.
+ */
+ local_flush_icache_range((unsigned long)waddr,
+ (unsigned long)waddr + len);
+
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@@ -135,6 +143,14 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
ret = copy_to_kernel_nofault(waddr, insn, len);
+ /*
+ * We could have just patched a function that is about to be
+ * called so make sure we don't execute partially patched
+ * instructions by flushing the icache as soon as possible.
+ */
+ local_flush_icache_range((unsigned long)waddr,
+ (unsigned long)waddr + len);
+
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@@ -189,9 +205,6 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
ret = patch_insn_set(tp, c, len);
- if (!ret)
- flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
-
return ret;
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
@@ -224,9 +237,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len)
ret = patch_insn_write(tp, insns, len);
- if (!ret)
- flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
-
return ret;
}
NOKPROBE_SYMBOL(patch_text_nosync);
@@ -253,9 +263,9 @@ static int patch_text_cb(void *data)
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
- }
- local_flush_icache_all();
+ local_flush_icache_all();
+ }
return ret;
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 528ec7cc9a62..0d3f00eb0bae 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -156,7 +156,7 @@ unsigned long __get_wchan(struct task_struct *task)
return pc;
}
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 64155323cc92..d77afe05578f 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -23,7 +23,7 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
#ifdef CONFIG_64BIT
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
+ unsigned long, fd, unsigned long, offset)
{
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
}
@@ -32,7 +32,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
+ unsigned long, fd, unsigned long, offset)
{
/*
* Note that the shift for mmap2 is constant (12),
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index 0eb689351b7d..5cd407c6a8e4 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
{
- u32 hart, group = 0;
+ u32 hart = 0, group = 0;
- hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
- GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ if (aia->nr_hart_bits)
+ hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+ GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_group_bits)
group = (addr >> aia->nr_group_shift) &
GENMASK_ULL(aia->nr_group_bits - 1, 0);
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index c676275ea0a0..62874fbca29f 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -724,9 +724,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
- case KVM_REG_RISCV_SBI_MULTI_EN:
+ case KVM_REG_RISCV_ISA_MULTI_EN:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
- case KVM_REG_RISCV_SBI_MULTI_DIS:
+ case KVM_REG_RISCV_ISA_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index b3fcf7d67efb..5224f3733802 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs)
if (unlikely(access_error(cause, vma))) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
- tsk->thread.bad_cause = SEGV_ACCERR;
- bad_area_nosemaphore(regs, code, addr);
+ tsk->thread.bad_cause = cause;
+ bad_area_nosemaphore(regs, SEGV_ACCERR, addr);
return;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index e3218d65f21d..e3405e4b99af 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -250,18 +250,19 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
- * memblock allocator is not aware of the fact that last 4K bytes of
- * the addressable memory can not be mapped because of IS_ERR_VALUE
- * macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory. For 64-bit
- * kernel, this problem can't happen here as the end of the virtual
- * address space is occupied by the kernel mapping then this check must
- * be done as soon as the kernel mapping base address is determined.
+ * Reserve physical address space that would be mapped to virtual
+ * addresses greater than (void *)(-PAGE_SIZE) because:
+ * - This memory would overlap with ERR_PTR
+ * - This memory belongs to high memory, which is not supported
+ *
+ * This is not applicable to 64-bit kernel, because virtual addresses
+ * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
+ * occupied by kernel mapping. Also it is unrealistic for high memory
+ * to exist on 64-bit platforms.
*/
if (!IS_ENABLED(CONFIG_64BIT)) {
- max_mapped_addr = __pa(~(ulong)0);
- if (max_mapped_addr == (phys_ram_end - 1))
- memblock_set_current_limit(max_mapped_addr - 4096);
+ max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
+ memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
min_low_pfn = PFN_UP(phys_ram_base);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 182aac6a0f77..5a36d5538dae 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -170,11 +170,14 @@ static void kaslr_adjust_got(unsigned long offset)
u64 *entry;
/*
- * Even without -fPIE, Clang still uses a global offset table for some
- * reason. Adjust the GOT entries.
+ * Adjust GOT entries, except for ones for undefined weak symbols
+ * that resolved to zero. This also skips the first three reserved
+ * entries on s390x that are zero.
*/
- for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
- *entry += offset - __START_KERNEL;
+ for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
+ if (*entry)
+ *entry += offset - __START_KERNEL;
+ }
}
/*
@@ -384,7 +387,7 @@ static void fixup_vmlinux_info(void)
void startup_kernel(void)
{
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
- unsigned long nokaslr_offset_phys = mem_safe_offset();
+ unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
unsigned long amode31_lma = 0;
unsigned long max_physmem_end;
unsigned long asce_limit;
@@ -393,6 +396,12 @@ void startup_kernel(void)
fixup_vmlinux_info();
setup_lpp();
+
+ /*
+ * Non-randomized kernel physical start address must be _SEGMENT_SIZE
+ * aligned (see blow).
+ */
+ nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
/*
@@ -425,10 +434,25 @@ void startup_kernel(void)
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);
- if (kaslr_enabled())
- __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
+ /*
+ * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
+ * 20 bits (the offset within a large page) are zero. Copy the last
+ * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
+ * __kaslr_offset_phys.
+ *
+ * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
+ * are identical, which is required to allow for large mappings of the
+ * kernel image.
+ */
+ kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
+ if (kaslr_enabled()) {
+ unsigned long end = ident_map_size - kaslr_large_page_offset;
+
+ __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
+ }
if (!__kaslr_offset_phys)
__kaslr_offset_phys = nokaslr_offset_phys;
+ __kaslr_offset_phys |= kaslr_large_page_offset;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
deploy_kernel((void *)__kaslr_offset_phys);
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 96d48b7112d4..40cfce2687c4 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
static bool large_allowed(enum populate_mode mode)
{
- return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
+ return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
+ unsigned long size = end - addr;
+
return machine.has_edat2 && large_allowed(mode) &&
- IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
+ IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
+ IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
}
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
+ unsigned long size = end - addr;
+
return machine.has_edat1 && large_allowed(mode) &&
- IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
+ IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
+ IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
}
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index 1fe5a1d3ff60..a750711d44c8 100644
--- a/arch/s390/boot/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -109,6 +109,7 @@ SECTIONS
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = ALIGN(PAGE_SIZE);
. += AMODE31_SIZE; /* .amode31 section */
+ . = ALIGN(1 << 20); /* _SEGMENT_SIZE */
#else
. = ALIGN(8);
#endif
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 145342e46ea8..8c4adece8911 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -43,7 +43,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
-CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@@ -51,6 +50,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
+CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
-CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NFT_XFRM=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
+CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -574,7 +601,6 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
-# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -645,7 +671,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -663,6 +688,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
+CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -879,6 +905,5 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index dc237896f99d..6dd11d3b6aaa 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -41,7 +41,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
-CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@@ -49,6 +48,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
+CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
-CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NFT_XFRM=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
+CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -630,7 +658,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -649,6 +676,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
+CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index c51f3ec4eb28..8c2b61363bab 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KEXEC=y
-CONFIG_CRASH_DUMP=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
+# CONFIG_AP is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
-# CONFIG_GCC_PLUGINS is not set
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SWAP is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
# CONFIG_PCPU_DEV_REFCNT is not set
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 7f5004065e8a..35555c944630 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -54,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
- choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
+ choose_random_kstack_offset(get_tod_clock_fast());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9863ebe75019..edae13416196 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -451,7 +451,7 @@ static void *nt_final(void *ptr)
/*
* Initialize ELF header (new kernel)
*/
-static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
+static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count)
{
memset(ehdr, 0, sizeof(*ehdr));
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
@@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
- /*
- * Number of memory chunk PT_LOAD program headers plus one kernel
- * image PT_LOAD program header plus one PT_NOTE program header.
- */
- ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
+ /* Number of PT_LOAD program headers plus PT_NOTE program header */
+ ehdr->e_phnum = phdr_count + 1;
return ehdr + 1;
}
@@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void)
/*
* Initialize ELF loads (new kernel)
*/
-static void loads_init(Elf64_Phdr *phdr)
+static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
{
- unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
+ unsigned long old_identity_base = 0;
phys_addr_t start, end;
u64 idx;
+ if (os_info_has_vm)
+ old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
phdr->p_type = PT_LOAD;
phdr->p_vaddr = old_identity_base + start;
@@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr)
}
}
+static bool os_info_has_vm(void)
+{
+ return os_info_old_value(OS_INFO_KASLR_OFFSET);
+}
+
/*
* Prepare PT_LOAD type program header for kernel image region
*/
@@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
-static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+static size_t get_elfcorehdr_size(int phdr_count)
{
size_t size;
@@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
size += nt_vmcoreinfo_size();
/* nt_final */
size += sizeof(Elf64_Nhdr);
- /* PT_LOAD type program header for kernel text region */
- size += sizeof(Elf64_Phdr);
/* PT_LOADS */
- size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+ size += phdr_count * sizeof(Elf64_Phdr);
return size;
}
@@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
+ int mem_chunk_cnt, phdr_text_cnt;
size_t alloc_size;
- int mem_chunk_cnt;
void *ptr, *hdr;
u64 hdr_off;
@@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
}
mem_chunk_cnt = get_mem_chunk_cnt();
+ phdr_text_cnt = os_info_has_vm() ? 1 : 0;
- alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+ alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt);
hdr = kzalloc(alloc_size, GFP_KERNEL);
- /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
+ /*
+ * Without elfcorehdr /proc/vmcore cannot be created. Thus creating
* a dump with this crash kernel will fail. Panic now to allow other
* dump mechanisms to take over.
*/
@@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
panic("s390 kdump allocating elfcorehdr failed");
/* Init elf header */
- ptr = ehdr_init(hdr, mem_chunk_cnt);
+ phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt);
/* Init program headers */
- phdr_notes = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
- phdr_text = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
- phdr_loads = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
+ if (phdr_text_cnt) {
+ phdr_text = phdr_notes + 1;
+ phdr_loads = phdr_text + 1;
+ } else {
+ phdr_loads = phdr_notes + 1;
+ }
+ ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init kernel text program header */
- text_init(phdr_text);
+ if (phdr_text_cnt)
+ text_init(phdr_text);
/* Init loads */
- loads_init(phdr_loads);
+ loads_init(phdr_loads, phdr_text_cnt);
/* Finalize program headers */
hdr_off = PTR_DIFF(ptr, hdr);
*addr = (unsigned long long) hdr;
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index dc2355c623d6..50cbcbbaa03d 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -38,33 +38,6 @@
#include "entry.h"
-/*
- * Perform the mmap() system call. Linux for S/390 isn't able to handle more
- * than 5 system call parameters, so this system call uses a memory block
- * for parameter passing.
- */
-
-struct s390_mmap_arg_struct {
- unsigned long addr;
- unsigned long len;
- unsigned long prot;
- unsigned long flags;
- unsigned long fd;
- unsigned long offset;
-};
-
-SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
-{
- struct s390_mmap_arg_struct a;
- int error = -EFAULT;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
- error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
-out:
- return error;
-}
-
#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index bd0fee24ad10..01071182763e 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 - sys_io_pgetevents
+416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index ff8f24854c64..0ef83b6ac0db 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
union zpci_sic_iib iib = {{0}};
union zpci_sic_iib ziib = {{0}};
- iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
+ iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 9dca568509a5..d6f4afcb0e87 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
+
+/*
+ * swap the arguments the way that libc wants them instead of
+ * moving flags ahead of the 64-bit nbytes argument
+ */
+SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
+ SC_ARG64(nbytes), unsigned int, flags)
+{
+ return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
+ SC_VAL64(loff_t, nbytes), flags);
+}
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index bbf83a2db986..c55fd7696d40 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -321,7 +321,7 @@
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
313 common splice sys_splice
-314 common sync_file_range sys_sync_file_range
+314 common sync_file_range sys_sh_sync_file_range6
315 common tee sys_tee
316 common vmsplice sys_vmsplice
317 common move_pages sys_move_pages
@@ -395,6 +395,7 @@
385 common pkey_alloc sys_pkey_alloc
386 common pkey_free sys_pkey_free
387 common rseq sys_rseq
+388 common sync_file_range2 sys_sync_file_range2
# room for arch specific syscalls
393 common semget sys_semget
394 common semctl sys_semctl
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index a45f0f31fe51..a3d308f2043e 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -18,224 +18,3 @@ sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
-
- .align 32
- .globl sys32_socketcall
-sys32_socketcall: /* %o0=call, %o1=args */
- cmp %o0, 1
- bl,pn %xcc, do_einval
- cmp %o0, 18
- bg,pn %xcc, do_einval
- sub %o0, 1, %o0
- sllx %o0, 5, %o0
- sethi %hi(__socketcall_table_begin), %g2
- or %g2, %lo(__socketcall_table_begin), %g2
- jmpl %g2 + %o0, %g0
- nop
-do_einval:
- retl
- mov -EINVAL, %o0
-
- .align 32
-__socketcall_table_begin:
-
- /* Each entry is exactly 32 bytes. */
-do_sys_socket: /* sys_socket(int, int, int) */
-1: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socket), %g1
-2: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_socket), %g0
-3: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
-4: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_bind), %g1
-5: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_bind), %g0
-6: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
-7: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_connect), %g1
-8: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_connect), %g0
-9: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_listen: /* sys_listen(int, int) */
-10: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_listen), %g1
- jmpl %g1 + %lo(sys_listen), %g0
-11: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
-12: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept), %g1
-13: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_accept), %g0
-14: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
-15: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getsockname), %g1
-16: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getsockname), %g0
-17: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
-18: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getpeername), %g1
-19: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getpeername), %g0
-20: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
-21: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socketpair), %g1
-22: ldswa [%o1 + 0x8] %asi, %o2
-23: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_socketpair), %g0
-24: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
-25: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_send), %g1
-26: lduwa [%o1 + 0x8] %asi, %o2
-27: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_send), %g0
-28: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
-29: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recv), %g1
-30: lduwa [%o1 + 0x8] %asi, %o2
-31: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_recv), %g0
-32: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
-33: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_sendto), %g1
-34: lduwa [%o1 + 0x8] %asi, %o2
-35: lduwa [%o1 + 0xc] %asi, %o3
-36: lduwa [%o1 + 0x10] %asi, %o4
-37: ldswa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_sendto), %g0
-38: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
-39: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recvfrom), %g1
-40: lduwa [%o1 + 0x8] %asi, %o2
-41: lduwa [%o1 + 0xc] %asi, %o3
-42: lduwa [%o1 + 0x10] %asi, %o4
-43: lduwa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_recvfrom), %g0
-44: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_shutdown: /* sys_shutdown(int, int) */
-45: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_shutdown), %g1
- jmpl %g1 + %lo(sys_shutdown), %g0
-46: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
-47: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_setsockopt), %g1
-48: ldswa [%o1 + 0x8] %asi, %o2
-49: lduwa [%o1 + 0xc] %asi, %o3
-50: ldswa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(sys_setsockopt), %g0
-51: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
-52: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getsockopt), %g1
-53: ldswa [%o1 + 0x8] %asi, %o2
-54: lduwa [%o1 + 0xc] %asi, %o3
-55: lduwa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(sys_getsockopt), %g0
-56: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
-57: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_sendmsg), %g1
-58: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_sendmsg), %g0
-59: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
-60: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_recvmsg), %g1
-61: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_recvmsg), %g0
-62: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
-63: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept4), %g1
-64: lduwa [%o1 + 0x8] %asi, %o2
-65: ldswa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_accept4), %g0
-66: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-
- .section __ex_table,"a"
- .align 4
- .word 1b, __retl_efault, 2b, __retl_efault
- .word 3b, __retl_efault, 4b, __retl_efault
- .word 5b, __retl_efault, 6b, __retl_efault
- .word 7b, __retl_efault, 8b, __retl_efault
- .word 9b, __retl_efault, 10b, __retl_efault
- .word 11b, __retl_efault, 12b, __retl_efault
- .word 13b, __retl_efault, 14b, __retl_efault
- .word 15b, __retl_efault, 16b, __retl_efault
- .word 17b, __retl_efault, 18b, __retl_efault
- .word 19b, __retl_efault, 20b, __retl_efault
- .word 21b, __retl_efault, 22b, __retl_efault
- .word 23b, __retl_efault, 24b, __retl_efault
- .word 25b, __retl_efault, 26b, __retl_efault
- .word 27b, __retl_efault, 28b, __retl_efault
- .word 29b, __retl_efault, 30b, __retl_efault
- .word 31b, __retl_efault, 32b, __retl_efault
- .word 33b, __retl_efault, 34b, __retl_efault
- .word 35b, __retl_efault, 36b, __retl_efault
- .word 37b, __retl_efault, 38b, __retl_efault
- .word 39b, __retl_efault, 40b, __retl_efault
- .word 41b, __retl_efault, 42b, __retl_efault
- .word 43b, __retl_efault, 44b, __retl_efault
- .word 45b, __retl_efault, 46b, __retl_efault
- .word 47b, __retl_efault, 48b, __retl_efault
- .word 49b, __retl_efault, 50b, __retl_efault
- .word 51b, __retl_efault, 52b, __retl_efault
- .word 53b, __retl_efault, 54b, __retl_efault
- .word 55b, __retl_efault, 56b, __retl_efault
- .word 57b, __retl_efault, 58b, __retl_efault
- .word 59b, __retl_efault, 60b, __retl_efault
- .word 61b, __retl_efault, 62b, __retl_efault
- .word 63b, __retl_efault, 64b, __retl_efault
- .word 65b, __retl_efault, 66b, __retl_efault
- .previous
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index ac6c281ccfe0..cfdfb3707c16 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -117,7 +117,7 @@
90 common dup2 sys_dup2
91 32 setfsuid32 sys_setfsuid
92 common fcntl sys_fcntl compat_sys_fcntl
-93 common select sys_select
+93 common select sys_select compat_sys_select
94 32 setfsgid32 sys_setfsgid
95 common fsync sys_fsync
96 common setpriority sys_setpriority
@@ -155,7 +155,7 @@
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
124 common fchmod sys_fchmod
-125 common recvfrom sys_recvfrom
+125 common recvfrom sys_recvfrom compat_sys_recvfrom
126 32 setreuid sys_setreuid16
126 64 setreuid sys_setreuid
127 32 setregid sys_setregid16
@@ -247,7 +247,7 @@
204 32 readdir sys_old_readdir compat_sys_old_readdir
204 64 readdir sys_nis_syscall
205 common readahead sys_readahead compat_sys_readahead
-206 common socketcall sys_socketcall sys32_socketcall
+206 common socketcall sys_socketcall compat_sys_socketcall
207 common syslog sys_syslog
208 common lookup_dcookie sys_ni_syscall
209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
@@ -461,7 +461,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 243ee86cb1b1..f2051644de94 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 7fd1f57ad3d3..d6ebcab1d8b2 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -420,7 +420,7 @@
412 i386 utimensat_time64 sys_utimensat
413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 i386 io_pgetevents_time64 sys_io_pgetevents
+416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 i386 mq_timedsend_time64 sys_mq_timedsend
419 i386 mq_timedreceive_time64 sys_mq_timedreceive
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index e64eaa8dda5a..9d6e8f13d13a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -114,6 +114,7 @@
#include "../perf_event.h"
#include "../probe.h"
+MODULE_DESCRIPTION("Support for Intel cstate performance events");
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 419c517b8594..c68f5b39952b 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -34,6 +34,7 @@ static struct event_constraint uncore_constraint_fixed =
struct event_constraint uncore_constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
+MODULE_DESCRIPTION("Support for Intel uncore performance events");
MODULE_LICENSE("GPL");
int uncore_pcibus_to_dieid(struct pci_bus *bus)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 46e673585560..0c5e7a7c43ac 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -64,6 +64,7 @@
#include "perf_event.h"
#include "probe.h"
+MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters");
MODULE_LICENSE("GPL");
/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ed2797f132ce..62cef2113ca7 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -93,10 +93,9 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- : [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+ : "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
o.full; \
@@ -122,12 +121,11 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
- [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
if (unlikely(!ret)) \
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 1dc600fa3ba5..481096177500 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data);
extern void __efi_memmap_free(u64 phys, unsigned long size,
unsigned long flags);
-#define __efi_memmap_free __efi_memmap_free
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 7e523bb3d2d3..fb2809b20b0a 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#endif
/*
- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
- * but not enough for x86 stack utilization comfort. To keep
- * reasonable stack head room, reduce the maximum offset to 8 bits.
- *
- * The actual entropy will be further reduced by the compiler when
- * applying stack alignment constraints (see cc_stack_align4/8 in
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints (see cc_stack_align4/8 in
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
* low bits from any entropy chosen here.
*
- * Therefore, final stack offset entropy will be 5 (x86_64) or
- * 6 (ia32) bits.
+ * Therefore, final stack offset entropy will be 7 (x86_64) or
+ * 8 (ia32) bits.
*/
- choose_random_kstack_offset(rdtsc() & 0xFF);
+ choose_random_kstack_offset(rdtsc());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ece45b3f6f20..f8ca74e7678f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2154,6 +2154,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, unsigned long roots);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0f9bab92a43d..3a7755c1a441 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -78,10 +78,10 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
- asm volatile("call __" #fn "_%c4" \
+ asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
- : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ : "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h
index 266daf5b5b84..695f36664889 100644
--- a/arch/x86/include/asm/vmxfeatures.h
+++ b/arch/x86/include/asm/vmxfeatures.h
@@ -77,7 +77,7 @@
#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */
#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */
#define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */
-#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "" Conditionally reflect EPT violations as #VE exceptions */
+#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */
#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */
#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3cf156f70859..027a8c7a2c9e 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -215,7 +215,14 @@ out:
int amd_smn_read(u16 node, u32 address, u32 *value)
{
- return __amd_smn_rw(node, address, value, false);
+ int err = __amd_smn_rw(node, address, value, false);
+
+ if (PCI_POSSIBLE_ERROR(*value)) {
+ err = -ENODEV;
+ *value = 0;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(amd_smn_read);
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index f9a8c7b7943f..b3fa61d45352 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -345,6 +345,7 @@ static DECLARE_WORK(disable_freq_invariance_work,
disable_freq_invariance_workfn);
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
static void scale_freq_tick(u64 acnt, u64 mcnt)
{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2b170da84f97..d4e539d4e158 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1075,6 +1075,10 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
+
+ /* Provide a sane default if not enumerated: */
+ if (!c->x86_clflush_size)
+ c->x86_clflush_size = 32;
}
c->x86_cache_bits = c->x86_phys_bits;
@@ -1585,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (have_cpuid_p()) {
cpu_detect(c);
get_cpu_vendor(c);
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
@@ -1744,7 +1749,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
cpu_detect(c);
get_cpu_vendor(c);
-
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
get_cpu_address_sizes(c);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index ea9e07d57c8d..1beccefbaff9 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
void tsx_ap_init(void);
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
+static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3c3e7e5695ba..fdf3489d92a4 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -269,19 +269,26 @@ detect_keyid_bits:
c->x86_phys_bits -= keyid_bits;
}
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
+
+ if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
+ return;
+
+ /*
+ * The BIOS can have limited CPUID to leaf 2, which breaks feature
+ * enumeration. Unlock it and update the maximum leaf info.
+ */
+ if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
+ c->cpuid_level = cpuid_eax(0);
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
- /* Unmask CPUID levels if masked: */
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
- MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
- c->cpuid_level = cpuid_eax(0);
- get_cpu_cap(c);
- }
- }
-
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 2345e6836593..366f496ca3ce 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -519,7 +519,8 @@ void free_rmid(u32 closid, u32 rmid)
* allows architectures that ignore the closid parameter to avoid an
* unnecessary check.
*/
- if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
+ if (!resctrl_arch_mon_capable() ||
+ idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
RESCTRL_RESERVED_RMID))
return;
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index d419deed6a48..7d476fa697ca 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
/*
* If leaf 0xb is available, then the domain shifts are set
- * already and nothing to do here.
+ * already and nothing to do here. Only valid for family >= 0x17.
*/
- if (!has_topoext) {
+ if (!has_topoext && tscan->c->x86 >= 0x17) {
/*
* Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it.
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index b180d8e497c3..cc0f7f70b17b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
- void *control_page;
+ unsigned int host_mem_enc_active;
int save_ftrace_enabled;
+ void *control_page;
+
+ /*
+ * This must be done before load_segments() since if call depth tracking
+ * is used then GS must be valid to make any function calls.
+ */
+ host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
+ host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index e42faa792c07..52e1f3f0b361 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -27,25 +27,7 @@
unsigned long profile_pc(struct pt_regs *regs)
{
- unsigned long pc = instruction_pointer(regs);
-
- if (!user_mode(regs) && in_lock_functions(pc)) {
-#ifdef CONFIG_FRAME_POINTER
- return *(unsigned long *)(regs->bp + sizeof(long));
-#else
- unsigned long *sp = (unsigned long *)regs->sp;
- /*
- * Return address is either directly at stack pointer
- * or above a saved flags. Eflags has bits 22-31 zero,
- * kernel addresses don't.
- */
- if (sp[0] >> 22)
- return sp[0];
- if (sp[1] >> 22)
- return sp[1];
-#endif
- }
- return pc;
+ return instruction_pointer(regs);
}
EXPORT_SYMBOL(profile_pc);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index d64fb2b3eb69..fec95a770270 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -44,6 +44,7 @@ config KVM
select KVM_VFIO
select HAVE_KVM_PM_NOTIFIER if PM
select KVM_GENERIC_HARDWARE_ENABLING
+ select KVM_WERROR if WERROR
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -66,7 +67,7 @@ config KVM_WERROR
# FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning.
# Building KVM with -Werror and KASAN is still doable via enabling
# the kernel-wide WERROR=y.
- depends on KVM && EXPERT && !KASAN
+ depends on KVM && ((EXPERT && !KASAN) || WERROR)
help
Add -Werror to the build flags for KVM.
@@ -97,15 +98,17 @@ config KVM_INTEL
config KVM_INTEL_PROVE_VE
bool "Check that guests do not receive #VE exceptions"
- default KVM_PROVE_MMU || DEBUG_KERNEL
- depends on KVM_INTEL
+ depends on KVM_INTEL && EXPERT
help
-
Checks that KVM's page table management code will not incorrectly
let guests receive a virtualization exception. Virtualization
exceptions will be trapped by the hypervisor rather than injected
in the guest.
+ Note: some CPUs appear to generate spurious EPT Violations #VEs
+ that trigger KVM's WARN, in particular with eptad=0 and/or nested
+ virtualization.
+
If unsure, say N.
config X86_SGX_KVM
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ebf41023be38..acd7d48100a1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -59,7 +59,17 @@
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
-static bool lapic_timer_advance_dynamic __read_mostly;
+/*
+ * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
+ * tuning. When enabled, KVM programs the host timer event to fire early, i.e.
+ * before the deadline expires, to account for the delay between taking the
+ * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
+ * the guest, i.e. so that the interrupt arrives in the guest with minimal
+ * latency relative to the deadline programmed by the guest.
+ */
+static bool lapic_timer_advance __read_mostly = true;
+module_param(lapic_timer_advance, bool, 0444);
+
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
@@ -1854,16 +1864,14 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
- if (lapic_timer_advance_dynamic) {
- adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
- /*
- * If the timer fired early, reread the TSC to account for the
- * overhead of the above adjustment to avoid waiting longer
- * than is necessary.
- */
- if (guest_tsc < tsc_deadline)
- guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- }
+ adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
+
+ /*
+ * If the timer fired early, reread the TSC to account for the overhead
+ * of the above adjustment to avoid waiting longer than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
@@ -2812,7 +2820,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
+int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
@@ -2845,13 +2853,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_HARD);
apic->lapic_timer.timer.function = apic_timer_fn;
- if (timer_advance_ns == -1) {
+ if (lapic_timer_advance)
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
- lapic_timer_advance_dynamic = true;
- } else {
- apic->lapic_timer.timer_advance_ns = timer_advance_ns;
- lapic_timer_advance_dynamic = false;
- }
/*
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0a0ea4b5dd8c..a69e706b9080 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -85,7 +85,7 @@ struct kvm_lapic {
struct dest_map;
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 662f62dfb2aa..8d74bdef68c1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
return xchg(sptep, spte);
}
@@ -4101,23 +4104,31 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
return leaf;
}
-/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
-static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level)
{
- u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
- struct rsvd_bits_validate *rsvd_check;
- int root, leaf, level;
- bool reserved = false;
+ int leaf;
walk_shadow_page_lockless_begin(vcpu);
if (is_tdp_mmu_active(vcpu))
- leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
+ leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
else
- leaf = get_walk(vcpu, addr, sptes, &root);
+ leaf = get_walk(vcpu, addr, sptes, root_level);
walk_shadow_page_lockless_end(vcpu);
+ return leaf;
+}
+
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
+static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+{
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+ struct rsvd_bits_validate *rsvd_check;
+ int root, leaf, level;
+ bool reserved = false;
+ leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
if (unlikely(leaf < 0)) {
*sptep = 0ull;
return reserved;
@@ -4400,9 +4411,6 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
return RET_PF_EMULATE;
}
- fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
- smp_rmb();
-
/*
* Check for a relevant mmu_notifier invalidation event before getting
* the pfn from the primary MMU, and before acquiring mmu_lock.
@@ -5921,6 +5929,22 @@ emulate:
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
+{
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+ int root_level, leaf, level;
+
+ leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
+ if (unlikely(leaf < 0))
+ return;
+
+ pr_err("%s %llx", msg, gpa);
+ for (level = root_level; level >= leaf; level--)
+ pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
+ pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
+
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa)
{
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 5dd5405fa07a..52fa004a1fbc 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -3,6 +3,8 @@
#ifndef KVM_X86_MMU_SPTE_H
#define KVM_X86_MMU_SPTE_H
+#include <asm/vmx.h>
+
#include "mmu.h"
#include "mmu_internal.h"
@@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte)
return !!(pte & SPTE_MMU_PRESENT_MASK);
}
+static inline bool is_ept_ve_possible(u64 spte)
+{
+ return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) &&
+ !(spte & VMX_EPT_SUPPRESS_VE_BIT) &&
+ (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
+}
+
/*
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index fae559559a80..2880fd392e0c 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
return xchg(rcu_dereference(sptep), new_spte);
}
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 1259dd63defc..36539c1b36cd 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -626,7 +626,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* SPTEs.
*/
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
- 0, iter->level, true);
+ SHADOW_NONPRESENT_VALUE, iter->level, true);
return 0;
}
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0623cfaa7bb0..95095a233a45 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -779,6 +779,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
*/
fpstate_set_confidential(&vcpu->arch.guest_fpu);
vcpu->arch.guest_state_protected = true;
+
+ /*
+ * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
+ * only after setting guest_state_protected because KVM_SET_MSRS allows
+ * dynamic toggling of LBRV (for performance reason) on write access to
+ * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
+ */
+ svm_enable_lbrv(vcpu);
return 0;
}
@@ -2406,6 +2414,12 @@ void __init sev_hardware_setup(void)
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
goto out;
+ if (!lbrv) {
+ WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
+ "LBRV must be present for SEV-ES support");
+ goto out;
+ }
+
/* Has the system been allocated ASIDs for SEV-ES? */
if (min_sev_asid == 1)
goto out;
@@ -3216,7 +3230,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
@@ -3268,10 +3281,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Clear intercepts on selected MSRs */
set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
void sev_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c8dc25886c16..c95d3900fe56 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
{ .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
+ { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -215,7 +216,7 @@ int vgif = true;
module_param(vgif, int, 0444);
/* enable/disable LBR virtualization */
-static int lbrv = true;
+int lbrv = true;
module_param(lbrv, int, 0444);
static int tsc_scaling = true;
@@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
}
-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ if (sev_es_guest(vcpu->kvm))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
+
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
@@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
@@ -2822,10 +2828,24 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
return 0;
}
+static bool
+sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ return sev_es_guest(vcpu->kvm) &&
+ vcpu->arch.guest_state_protected &&
+ svm_msrpm_offset(msr_info->index) != MSR_INVALID &&
+ !msr_write_intercepted(vcpu, msr_info->index);
+}
+
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (sev_es_prevent_msr_access(vcpu, msr_info)) {
+ msr_info->data = 0;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
+ }
+
switch (msr_info->index) {
case MSR_AMD64_TSC_RATIO:
if (!msr_info->host_initiated &&
@@ -2976,6 +2996,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index;
u64 data = msr->data;
+
+ if (sev_es_prevent_msr_access(vcpu, msr))
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
+
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
@@ -3846,16 +3870,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
/*
- * KVM should never request an NMI window when vNMI is enabled, as KVM
- * allows at most one to-be-injected NMI and one pending NMI, i.e. if
- * two NMIs arrive simultaneously, KVM will inject one and set
- * V_NMI_PENDING for the other. WARN, but continue with the standard
- * single-step approach to try and salvage the pending NMI.
+ * If NMIs are outright masked, i.e. the vCPU is already handling an
+ * NMI, and KVM has not yet intercepted an IRET, then there is nothing
+ * more to do at this time as KVM has already enabled IRET intercepts.
+ * If KVM has already intercepted IRET, then single-step over the IRET,
+ * as NMIs aren't architecturally unmasked until the IRET completes.
+ *
+ * If vNMI is enabled, KVM should never request an NMI window if NMIs
+ * are masked, as KVM allows at most one to-be-injected NMI and one
+ * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
+ * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
+ * unmasked. KVM _will_ request an NMI window in some situations, e.g.
+ * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
+ * inject the NMI. In those situations, KVM needs to single-step over
+ * the STI shadow or intercept STGI.
*/
- WARN_ON_ONCE(is_vnmi_enabled(svm));
+ if (svm_get_nmi_mask(vcpu)) {
+ WARN_ON_ONCE(is_vnmi_enabled(svm));
- if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
- return; /* IRET will cause a vm exit */
+ if (!svm->awaiting_iret_completion)
+ return; /* IRET will cause a vm exit */
+ }
/*
* SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -5265,6 +5300,12 @@ static __init int svm_hardware_setup(void)
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+ if (lbrv) {
+ if (!boot_cpu_has(X86_FEATURE_LBRV))
+ lbrv = false;
+ else
+ pr_info("LBR virtualization supported\n");
+ }
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
@@ -5318,14 +5359,6 @@ static __init int svm_hardware_setup(void)
svm_x86_ops.set_vnmi_pending = NULL;
}
-
- if (lbrv) {
- if (!boot_cpu_has(X86_FEATURE_LBRV))
- lbrv = false;
- else
- pr_info("LBR virtualization supported\n");
- }
-
if (!enable_pmu)
pr_info("PMU virtualization is disabled\n");
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index be57213cd295..0f1472690b59 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 47
+#define MAX_DIRECT_ACCESS_MSRS 48
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
@@ -39,6 +39,7 @@ extern int vgif;
extern bool intercept_smi;
extern bool x2avic_enabled;
extern bool vnmi;
+extern int lbrv;
/*
* Clean bits in VMCB.
@@ -552,6 +553,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
+void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d5b832126e34..643935a0f70a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2242,6 +2242,9 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
vmcs_write64(EPT_POINTER,
construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
+ if (vmx->ve_info)
+ vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info));
+
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (cpu_has_vmx_vmfunc())
vmcs_write64(VM_FUNCTION_CONTROL, 0);
@@ -6230,6 +6233,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
else if (is_alignment_check(intr_info) &&
!vmx_guest_inject_ac(vcpu))
return true;
+ else if (is_ve_fault(intr_info))
+ return true;
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6051fad5945f..b3c83c06f826 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5218,8 +5218,15 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (is_invalid_opcode(intr_info))
return handle_ud(vcpu);
- if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
- return -EIO;
+ if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
+ struct vmx_ve_information *ve_info = vmx->ve_info;
+
+ WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
+ "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
+ dump_vmcs(vcpu);
+ kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
+ return 1;
+ }
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 082ac6d95a3a..0763a0f72a06 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -164,15 +164,6 @@ module_param(kvmclock_periodic_sync, bool, 0444);
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, 0644);
-/*
- * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
- * adaptive tuning starting from default advancement of 1000ns. '0' disables
- * advancement entirely. Any other value is used as-is and disables adaptive
- * tuning, i.e. allows privileged userspace to set an exact advancement time.
- */
-static int __read_mostly lapic_timer_advance_ns = -1;
-module_param(lapic_timer_advance_ns, int, 0644);
-
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, 0444);
@@ -10727,13 +10718,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
- else {
- static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
- if (ioapic_in_kernel(vcpu->kvm))
- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
- }
+ else if (ioapic_in_kernel(vcpu->kvm))
+ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
if (is_guest_mode(vcpu))
vcpu->arch.load_eoi_exitmap_pending = true;
@@ -12169,7 +12159,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (r < 0)
return r;
- r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+ r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 10d5ed8b5990..a1cb3a4e6742 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -44,7 +44,11 @@
or %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
+.if \size != 8
jae .Lbad_get_user
+.else
+ jae .Lbad_get_user_8
+.endif
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
@@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
-bad_get_user_8:
+.Lbad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index ce84ba86e69e..6ce10e3c6228 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
for_each_reserved_mem_region(mb_region) {
int nid = memblock_get_region_node(mb_region);
- if (nid != MAX_NUMNODES)
+ if (nid != NUMA_NO_NODE)
node_set(nid, reserved_nodemask);
}
@@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
/* In case that parsing SRAT failed. */
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
index 4ef20b49eb5e..6ed1935504b9 100644
--- a/arch/x86/platform/efi/memmap.c
+++ b/arch/x86/platform/efi/memmap.c
@@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
*/
int __init efi_memmap_install(struct efi_memory_map_data *data)
{
+ unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
+ unsigned long flags = efi.memmap.flags;
+ u64 phys = efi.memmap.phys_map;
+ int ret;
+
efi_memmap_unmap();
if (efi_enabled(EFI_PARAVIRT))
return 0;
- return __efi_memmap_init(data);
+ ret = __efi_memmap_init(data);
+ if (ret)
+ return ret;
+
+ __efi_memmap_free(phys, size, flags);
+ return 0;
}
/**
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 2e3e8e04961e..8b528e12136f 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -144,10 +144,10 @@ void bio_integrity_free(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ return;
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(bvec_virt(bip->bip_vec));
- else if (bip->bip_flags & BIP_INTEGRITY_USER)
- bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
@@ -155,6 +155,28 @@ void bio_integrity_free(struct bio *bio)
}
/**
+ * bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
+ * @bio: bio containing bip to be unmapped and freed
+ *
+ * Description: Used to unmap and free the user mapped integrity portion of a
+ * bio. Submitter attaching the user integrity buffer is responsible for
+ * unmapping and freeing it during completion.
+ */
+void bio_integrity_unmap_free_user(struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_set *bs = bio->bi_pool;
+
+ if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
+ return;
+ bio_integrity_unmap_user(bip);
+ __bio_integrity_free(bs, bip);
+ bio->bi_integrity = NULL;
+ bio->bi_opf &= ~REQ_INTEGRITY;
+}
+EXPORT_SYMBOL(bio_integrity_unmap_free_user);
+
+/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
* @page: page containing integrity metadata
diff --git a/block/blk-flush.c b/block/blk-flush.c
index c17cf8ed8113..cca4f9131f79 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->queuelist, pending);
+ list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a7fe8e90240a..effeb9a639bb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -104,6 +104,7 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
static int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
+ unsigned int logical_block_sectors;
/*
* Unless otherwise specified, default to 512 byte logical blocks and a
@@ -134,8 +135,11 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
return -EINVAL;
+ logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
+ if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
+ return -EINVAL;
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* The actual max_sectors value is a complex beast and also takes the
@@ -153,7 +157,7 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
lim->max_sectors = round_down(lim->max_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* Random default for the maximum number of segments. Driver should not
@@ -611,6 +615,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_user_sectors = min_not_zero(t->max_user_sectors,
+ b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17e1eb4ec7e2..5d7f18ba436d 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -64,7 +64,6 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
-bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0be180f9a789..c1bf73f8c75d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1399,32 +1399,32 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
bps_dft = U64_MAX;
iops_dft = UINT_MAX;
- if (tg->bps_conf[READ] == bps_dft &&
- tg->bps_conf[WRITE] == bps_dft &&
- tg->iops_conf[READ] == iops_dft &&
- tg->iops_conf[WRITE] == iops_dft)
+ if (tg->bps[READ] == bps_dft &&
+ tg->bps[WRITE] == bps_dft &&
+ tg->iops[READ] == iops_dft &&
+ tg->iops[WRITE] == iops_dft)
return 0;
seq_printf(sf, "%s", dname);
- if (tg->bps_conf[READ] == U64_MAX)
+ if (tg->bps[READ] == U64_MAX)
seq_printf(sf, " rbps=max");
else
- seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
+ seq_printf(sf, " rbps=%llu", tg->bps[READ]);
- if (tg->bps_conf[WRITE] == U64_MAX)
+ if (tg->bps[WRITE] == U64_MAX)
seq_printf(sf, " wbps=max");
else
- seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
+ seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
- if (tg->iops_conf[READ] == UINT_MAX)
+ if (tg->iops[READ] == UINT_MAX)
seq_printf(sf, " riops=max");
else
- seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
+ seq_printf(sf, " riops=%u", tg->iops[READ]);
- if (tg->iops_conf[WRITE] == UINT_MAX)
+ if (tg->iops[WRITE] == UINT_MAX)
seq_printf(sf, " wiops=max");
else
- seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
+ seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
seq_printf(sf, "\n");
return 0;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 393c3d134b96..4d9ef5abdf21 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -95,15 +95,11 @@ struct throtl_grp {
bool has_rules_bps[2];
bool has_rules_iops[2];
- /* internally used bytes per second rate limits */
+ /* bytes per second rate limits */
uint64_t bps[2];
- /* user configured bps limits */
- uint64_t bps_conf[2];
- /* internally used IOPS limits */
+ /* IOPS limits */
unsigned int iops[2];
- /* user configured IOPS limits */
- unsigned int iops_conf[2];
/* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2];
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 03aa4eead39e..08d7dfe8bd93 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -450,6 +450,25 @@ static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
+static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
+{
+ return zone->start + zone->len >= get_capacity(disk);
+}
+
+static bool disk_zone_is_full(struct gendisk *disk,
+ unsigned int zno, unsigned int offset_in_zone)
+{
+ if (zno < disk->nr_zones - 1)
+ return offset_in_zone >= disk->zone_capacity;
+ return offset_in_zone >= disk->last_zone_capacity;
+}
+
+static bool disk_zone_wplug_is_full(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
+}
+
static bool disk_insert_zone_wplug(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
@@ -543,7 +562,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
return false;
/* We can remove zone write plugs for zones that are empty or full. */
- return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity;
+ return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
}
static void disk_remove_zone_wplug(struct gendisk *disk,
@@ -664,13 +683,12 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
- unsigned int zone_capacity = disk->zone_capacity;
unsigned int wp_offset = zwplug->wp_offset;
struct bio_list bl = BIO_EMPTY_LIST;
struct bio *bio;
while ((bio = bio_list_pop(&zwplug->bio_list))) {
- if (wp_offset >= zone_capacity ||
+ if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
bio_offset_from_zone_start(bio) != wp_offset)) {
blk_zone_wplug_bio_io_error(zwplug, bio);
@@ -909,7 +927,6 @@ void blk_zone_write_plug_init_request(struct request *req)
sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
struct request_queue *q = req->q;
struct gendisk *disk = q->disk;
- unsigned int zone_capacity = disk->zone_capacity;
struct blk_zone_wplug *zwplug =
disk_get_zone_wplug(disk, blk_rq_pos(req));
unsigned long flags;
@@ -933,7 +950,7 @@ void blk_zone_write_plug_init_request(struct request *req)
* into the back of the request.
*/
spin_lock_irqsave(&zwplug->lock, flags);
- while (zwplug->wp_offset < zone_capacity) {
+ while (!disk_zone_wplug_is_full(disk, zwplug)) {
bio = bio_list_peek(&zwplug->bio_list);
if (!bio)
break;
@@ -979,7 +996,7 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
* We know such BIO will fail, and that would potentially overflow our
* write pointer offset beyond the end of the zone.
*/
- if (zwplug->wp_offset >= disk->zone_capacity)
+ if (disk_zone_wplug_is_full(disk, zwplug))
goto err;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
@@ -1535,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
void disk_free_zone_resources(struct gendisk *disk)
{
+ if (!disk->zone_wplugs_pool)
+ return;
+
cancel_work_sync(&disk->zone_wplugs_work);
if (disk->zone_wplugs_wq) {
@@ -1556,6 +1576,7 @@ void disk_free_zone_resources(struct gendisk *disk)
kfree(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
disk->zone_capacity = 0;
+ disk->last_zone_capacity = 0;
disk->nr_zones = 0;
}
@@ -1600,6 +1621,7 @@ struct blk_revalidate_zone_args {
unsigned long *conv_zones_bitmap;
unsigned int nr_zones;
unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
sector_t sector;
};
@@ -1617,6 +1639,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
disk->nr_zones = args->nr_zones;
disk->zone_capacity = args->zone_capacity;
+ disk->last_zone_capacity = args->last_zone_capacity;
swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
if (disk->conv_zones_bitmap)
nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
@@ -1668,6 +1691,9 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
+ if (disk_zone_is_last(disk, zone))
+ args->last_zone_capacity = zone->capacity;
+
if (!disk_need_zone_resources(disk))
return 0;
@@ -1693,11 +1719,14 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
/*
* Remember the capacity of the first sequential zone and check
- * if it is constant for all zones.
+ * if it is constant for all zones, ignoring the last zone as it can be
+ * smaller.
*/
if (!args->zone_capacity)
args->zone_capacity = zone->capacity;
- if (zone->capacity != args->zone_capacity) {
+ if (disk_zone_is_last(disk, zone)) {
+ args->last_zone_capacity = zone->capacity;
+ } else if (zone->capacity != args->zone_capacity) {
pr_warn("%s: Invalid variable zone capacity\n",
disk->disk_name);
return -ENODEV;
@@ -1732,7 +1761,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
{
struct blk_revalidate_zone_args *args = data;
struct gendisk *disk = args->disk;
- sector_t capacity = get_capacity(disk);
sector_t zone_sectors = disk->queue->limits.chunk_sectors;
int ret;
@@ -1743,7 +1771,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
- if (zone->start >= capacity || !zone->len) {
+ if (zone->start >= get_capacity(disk) || !zone->len) {
pr_warn("%s: Invalid zone start %llu, length %llu\n",
disk->disk_name, zone->start, zone->len);
return -ENODEV;
@@ -1753,7 +1781,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
* All zones must have the same size, with the exception on an eventual
* smaller last zone.
*/
- if (zone->start + zone->len < capacity) {
+ if (!disk_zone_is_last(disk, zone)) {
if (zone->len != zone_sectors) {
pr_warn("%s: Invalid zoned device with non constant zone size\n",
disk->disk_name);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 14fe0fef811c..598fd3e7fcc8 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
- ret = PTR_ERR(kref);
+ return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2d4a35e6dd18..09a87fa222c7 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
dev_name(&adev->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(adev, event, (u32) ac->state);
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
}
}
@@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
if (acpi_ac_get_state(ac))
return 0;
if (old_state != ac->state)
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
return 0;
}
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ddd072cbc738..2133085deda7 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -191,6 +191,10 @@ void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
+void
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 18fdf2bc2d49..dc6004daf624 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
-static void
-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
- acpi_adr_space_type space_id);
-
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
-static void
+void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3197e6303c5b..624361a5f34d 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_execute_orphan_reg_method
+ *
+ * PARAMETERS: device - Handle for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
+ * device. This is a _REG method that has no corresponding region
+ * within the device's scope.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(device);
+ if (node) {
+
+ /*
+ * If an "orphan" _REG method is present in the device's scope
+ * for the given address space ID, run it.
+ */
+
+ acpi_ev_execute_orphan_reg_method(node, space_id);
+ } else {
+ status = AE_BAD_PARAMETER;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8907b8bf4267..c49b9f8de723 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
- acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
- /*
- * If mapping the entire remaining portion of the region will cross
- * a page boundary, just map up to the page boundary, do not cross.
- * On some systems, crossing a page boundary while mapping regions
- * can cause warnings if the pages have different attributes
- * due to resource management.
- *
- * This has the added benefit of constraining a single mapping to
- * one page, which is similar to the original code that used a 4k
- * maximum window.
- */
- page_boundary_map_length = (acpi_size)
- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
- if (page_boundary_map_length == 0) {
- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
- }
-
- if (map_length > page_boundary_map_length) {
- map_length = page_boundary_map_length;
- }
+ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
+ map_length = ACPI_DEFAULT_PAGE_SIZE;
/* Create a new mapping starting at the address given */
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 9515bcfe5e97..73903a497d73 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -909,7 +909,7 @@ static void __exit einj_exit(void)
if (einj_initialized)
platform_driver_unregister(&einj_driver);
- platform_device_del(einj_dev);
+ platform_device_unregister(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e7793ee9e649..299ec653388c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
- for (i = 0; i < bytes; ++i, ++address, ++value)
+ for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
+ if (result < 0)
+ break;
+ }
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
@@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
- default:
+ case 0:
return AE_OK;
+ default:
+ return AE_ERROR;
}
}
@@ -1502,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
+ if (scope_handle != ec->handle)
+ acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
+
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 2a0e9fc7b74c..601b670356e5 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -302,6 +302,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+#ifdef CONFIG_X86
bool acpi_graph_ignore_port(acpi_handle handle);
+#else
+static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
+#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index d05413a0672a..92b658f92dc0 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
acpi_mipi_del_crs_csi2(csi2);
}
-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
- },
- },
- { }
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
+static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ {}
};
static const char *strnext(const char *s1, const char *s2)
@@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
static bool dmi_tested, ignore_port;
if (!dmi_tested) {
- ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ if (dmi_name_in_vendors("Dell Inc.") &&
+ x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
+ ignore_port = true;
+
dmi_tested = true;
}
@@ -794,3 +803,4 @@ out_free:
kfree(orig_path);
return false;
}
+#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 94e3c000df2e..dc8164b182dc 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context)
if (sbs->charger_exists) {
acpi_ac_get_present(sbs);
if (sbs->charger_present != saved_charger_state)
- kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(sbs->charger);
}
if (sbs->manager_present) {
@@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context)
acpi_battery_read(bat);
if (saved_battery_state == bat->present)
continue;
- kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(bat->bat);
}
}
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d67881b50bca..a0cfc857fb55 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
+ int temp;
+
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
- return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
+ if (temp <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ return temp;
}
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 7dca73417e2b..2fe0934dcd64 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
- * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
- * but this property was introduced after many of these systems launched
- * and most OEM systems don't have it in their BIOS.
+ * but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
- * a hardcoded allowlist for D3 support, which was used for these platforms.
+ * a hardcoded allowlist for D3 support as well as a registry key to override
+ * the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
- disk in the system.
+ * disk in the system.
+ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
+ * NVME device.
*/
-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
- {}
-};
-
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return false;
+ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b21a7b246a0d..0c2161b1f057 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1045,6 +1045,66 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}
+/* Find the smallest unused descriptor the "slow way" */
+static u32 slow_desc_lookup_olocked(struct binder_proc *proc)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ desc = 1;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
+/*
+ * Find an available reference descriptor ID. The proc->outer_lock might
+ * be released in the process, in which case -EAGAIN is returned and the
+ * @desc should be considered invalid.
+ */
+static int get_ref_desc_olocked(struct binder_proc *proc,
+ struct binder_node *node,
+ u32 *desc)
+{
+ struct dbitmap *dmap = &proc->dmap;
+ unsigned long *new, bit;
+ unsigned int nbits;
+
+ /* 0 is reserved for the context manager */
+ if (node == proc->context->binder_context_mgr_node) {
+ *desc = 0;
+ return 0;
+ }
+
+ if (!dbitmap_enabled(dmap)) {
+ *desc = slow_desc_lookup_olocked(proc);
+ return 0;
+ }
+
+ if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) {
+ *desc = bit;
+ return 0;
+ }
+
+ /*
+ * The dbitmap is full and needs to grow. The proc->outer_lock
+ * is briefly released to allocate the new bitmap safely.
+ */
+ nbits = dbitmap_grow_nbits(dmap);
+ binder_proc_unlock(proc);
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_grow(dmap, new, nbits);
+
+ return -EAGAIN;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,12 +1128,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
- struct rb_node **p = &proc->refs_by_node.rb_node;
- struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;
+ struct rb_node *parent;
+ struct rb_node **p;
+ u32 desc;
+retry:
+ p = &proc->refs_by_node.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
@@ -1088,6 +1150,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
if (!new_ref)
return NULL;
+ /* might release the proc->outer_lock */
+ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
+ goto retry;
+
binder_stats_created(BINDER_STAT_REF);
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
@@ -1095,14 +1161,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+ new_ref->data.desc = desc;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1131,6 +1190,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ struct dbitmap *dmap = &ref->proc->dmap;
bool delete_node = false;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -1138,6 +1198,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
+ if (dbitmap_enabled(dmap))
+ dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
@@ -1298,6 +1360,25 @@ static void binder_free_ref(struct binder_ref *ref)
kfree(ref);
}
+/* shrink descriptor bitmap if needed */
+static void try_shrink_dmap(struct binder_proc *proc)
+{
+ unsigned long *new;
+ int nbits;
+
+ binder_proc_lock(proc);
+ nbits = dbitmap_shrink_nbits(&proc->dmap);
+ binder_proc_unlock(proc);
+
+ if (!nbits)
+ return;
+
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_shrink(&proc->dmap, new, nbits);
+ binder_proc_unlock(proc);
+}
+
/**
* binder_update_ref_for_handle() - inc/dec the ref for given handle
* @proc: proc containing the ref
@@ -1334,8 +1415,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
*rdata = ref->data;
binder_proc_unlock(proc);
- if (delete_ref)
+ if (delete_ref) {
binder_free_ref(ref);
+ try_shrink_dmap(proc);
+ }
return ret;
err_no_ref:
@@ -4931,6 +5014,7 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
+ dbitmap_free(&proc->dmap);
kfree(proc);
}
@@ -5634,6 +5718,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+
+ dbitmap_init(&proc->dmap);
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2e1f261ec5c8..b00961944ab1 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -836,9 +836,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->buffer = vma->vm_start;
- alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
- sizeof(alloc->pages[0]),
- GFP_KERNEL);
+ alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
+ sizeof(alloc->pages[0]),
+ GFP_KERNEL);
if (alloc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
@@ -869,7 +869,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
return 0;
err_alloc_buf_struct_failed:
- kfree(alloc->pages);
+ kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
alloc->buffer = 0;
@@ -939,7 +939,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
- kfree(alloc->pages);
+ kvfree(alloc->pages);
}
spin_unlock(&alloc->lock);
if (alloc->mm)
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 5b7c80b99ae8..7d4fc53f7a73 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -14,6 +14,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/android/binderfs.h>
#include "binder_alloc.h"
+#include "dbitmap.h"
struct binder_context {
struct binder_node *binder_context_mgr_node;
@@ -368,6 +369,8 @@ struct binder_ref {
* @freeze_wait: waitqueue of processes waiting for all outstanding
* transactions to be processed
* (protected by @inner_lock)
+ * @dmap dbitmap to manage available reference descriptors
+ * (protected by @outer_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
@@ -417,7 +420,7 @@ struct binder_proc {
bool sync_recv;
bool async_recv;
wait_queue_head_t freeze_wait;
-
+ struct dbitmap dmap;
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
new file mode 100644
index 000000000000..b8ac7b4764fd
--- /dev/null
+++ b/drivers/android/dbitmap.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2024 Google LLC
+ *
+ * dbitmap - dynamically sized bitmap library.
+ *
+ * Used by the binder driver to optimize the allocation of the smallest
+ * available descriptor ID. Each bit in the bitmap represents the state
+ * of an ID, with the exception of BIT(0) which is used exclusively to
+ * reference binder's context manager.
+ *
+ * A dbitmap can grow or shrink as needed. This part has been designed
+ * considering that users might need to briefly release their locks in
+ * order to allocate memory for the new bitmap. These operations then,
+ * are verified to determine if the grow or shrink is sill valid.
+ *
+ * This library does not provide protection against concurrent access
+ * by itself. Binder uses the proc->outer_lock for this purpose.
+ */
+
+#ifndef _LINUX_DBITMAP_H
+#define _LINUX_DBITMAP_H
+#include <linux/bitmap.h>
+
+#define NBITS_MIN BITS_PER_TYPE(unsigned long)
+
+struct dbitmap {
+ unsigned int nbits;
+ unsigned long *map;
+};
+
+static inline int dbitmap_enabled(struct dbitmap *dmap)
+{
+ return !!dmap->nbits;
+}
+
+static inline void dbitmap_free(struct dbitmap *dmap)
+{
+ dmap->nbits = 0;
+ kfree(dmap->map);
+}
+
+/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
+static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
+{
+ unsigned int bit;
+
+ if (dmap->nbits <= NBITS_MIN)
+ return 0;
+
+ /*
+ * Determine if the bitmap can shrink based on the position of
+ * its last set bit. If the bit is within the first quarter of
+ * the bitmap then shrinking is possible. In this case, the
+ * bitmap should shrink to half its current size.
+ */
+ bit = find_last_bit(dmap->map, dmap->nbits);
+ if (bit < (dmap->nbits >> 2))
+ return dmap->nbits >> 1;
+
+ /*
+ * Note that find_last_bit() returns dmap->nbits when no bits
+ * are set. While this is technically not possible here since
+ * BIT(0) is always set, this check is left for extra safety.
+ */
+ if (bit == dmap->nbits)
+ return NBITS_MIN;
+
+ return 0;
+}
+
+/* Replace the internal bitmap with a new one of different size */
+static inline void
+dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
+ kfree(dmap->map);
+ dmap->map = new;
+ dmap->nbits = nbits;
+}
+
+static inline void
+dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ if (!new)
+ return;
+
+ /*
+ * Verify that shrinking to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || dbitmap_shrink_nbits(dmap) != nbits) {
+ kfree(new);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/* Returns the nbits that a dbitmap can grow to. */
+static inline unsigned int dbitmap_grow_nbits(struct dbitmap *dmap)
+{
+ return dmap->nbits << 1;
+}
+
+static inline void
+dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ /*
+ * Verify that growing to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || nbits <= dmap->nbits) {
+ kfree(new);
+ return;
+ }
+
+ /*
+ * Check for ENOMEM after confirming the grow operation is still
+ * required. This ensures we only disable the dbitmap when it's
+ * necessary. Once the dbitmap is disabled, binder will fallback
+ * to slow_desc_lookup_olocked().
+ */
+ if (!new) {
+ dbitmap_free(dmap);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/*
+ * Finds and sets the first zero bit in the bitmap. Upon success @bit
+ * is populated with the index and 0 is returned. Otherwise, -ENOSPC
+ * is returned to indicate that a dbitmap_grow() is needed.
+ */
+static inline int
+dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
+{
+ unsigned long n;
+
+ n = find_first_zero_bit(dmap->map, dmap->nbits);
+ if (n == dmap->nbits)
+ return -ENOSPC;
+
+ *bit = n;
+ set_bit(n, dmap->map);
+
+ return 0;
+}
+
+static inline void
+dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
+{
+ /* BIT(0) should always set for the context manager */
+ if (bit)
+ clear_bit(bit, dmap->map);
+}
+
+static inline int dbitmap_init(struct dbitmap *dmap)
+{
+ dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
+ if (!dmap->map) {
+ dmap->nbits = 0;
+ return -ENOMEM;
+ }
+
+ dmap->nbits = NBITS_MIN;
+ /* BIT(0) is reserved for the context manager */
+ set_bit(0, dmap->map);
+
+ return 0;
+}
+#endif
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6548f10e61d9..fc6fd583faf8 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1736,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_EXTERNAL)
return;
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ return;
+ }
+
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
policy = mobile_lpm_policy;
@@ -1968,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ rc = -ENOMEM;
+ goto err_rm_sysfs_file;
+ }
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
@@ -2024,11 +2033,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@@ -2037,10 +2046,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
+
+err_rm_sysfs_file:
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr, NULL);
+ return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4f35aab81a0a..74b59b78d278 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4136,8 +4136,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
- /* Crucial BX100 SSD 500GB has broken LPM support */
- { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+ /* Crucial devices with broken LPM support */
+ { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4155,6 +4155,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
+ /* AMD Radeon devices with broken LPM support */
+ { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
@@ -5483,6 +5489,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
return ap;
}
+void ata_port_free(struct ata_port *ap)
+{
+ if (!ap)
+ return;
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ kfree(ap->ncq_sense_buf);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_port_free);
+
static void ata_devres_release(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
@@ -5509,12 +5527,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- kfree(ap->pmp_link);
- kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
- kfree(ap);
+ ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@@ -5564,8 +5577,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
if (!host)
return NULL;
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- goto err_free;
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
@@ -5597,8 +5612,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
- err_free:
- kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@@ -5897,7 +5910,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
* allocation time.
*/
for (i = host->n_ports; host->ports[i]; i++)
- kfree(host->ports[i]);
+ ata_port_free(host->ports[i]);
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cdf29b178ddc..bb4d30d377ae 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1831,11 +1831,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 817838e2f70e..3cb455a32d92 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = {
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
+ /*
+ * The SCSI core requires the segment size to cover at least a page, so
+ * for 64K page size kernels this must be at least 64K. However the
+ * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+ * requests.
*/
- .max_segment_size = MAX_DBDMA_SEG,
+ .max_segment_size = SZ_64K,
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 9a2cb9ca9d1d..93ebf566b54e 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -768,7 +768,6 @@ static struct parport_driver pata_parport_driver = {
.name = DRV_NAME,
.match_port = pata_parport_attach,
.detach = pata_parport_detach,
- .devmodel = true,
};
static __init int pata_parport_init(void)
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 234f9dbe6e30..51587f0fdaae 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -162,7 +162,6 @@ static struct parport_driver ks0108_parport_driver = {
.name = "ks0108",
.match_port = ks0108_parport_attach,
.detach = ks0108_parport_detach,
- .devmodel = true,
};
module_parport_driver(ks0108_parport_driver);
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 049ff443e790..a731f28455b4 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1706,7 +1706,6 @@ static struct parport_driver panel_driver = {
.name = "panel",
.match_port = panel_attach,
.detach = panel_detach,
- .devmodel = true,
};
module_parport_driver(panel_driver);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 131d96c6090b..2b4c0624b704 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -2845,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2886,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 3ec611dc0c09..a905e955bbfc 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
- max_write = quirks->max_write_len;
+ max_write = quirks->max_write_len -
+ (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 93780f41646b..1153721bc7c2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
+static void loop_clear_limits(struct loop_device *lo, int mode)
+{
+ struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
+
+ if (mode & FALLOC_FL_ZERO_RANGE)
+ lim.max_write_zeroes_sectors = 0;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ lim.max_hw_discard_sectors = 0;
+ lim.discard_granularity = 0;
+ }
+
+ queue_limits_commit_update(lo->lo_queue, &lim);
+}
+
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
@@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
+
+ /*
+ * We initially configure the limits in a hope that fallocate is
+ * supported and clear them here if that turns out not to be true.
+ */
+ if (unlikely(ret == -EOPNOTSUPP))
+ loop_clear_limits(lo, mode);
+
return ret;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 22a79a62cc4e..b87aa80a46dd 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -589,10 +589,11 @@ static inline int was_interrupted(int result)
}
/*
- * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
- * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
+ * Returns BLK_STS_IOERR if sending failed.
*/
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
+ int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
@@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
- return -EIO;
+ return BLK_STS_IOERR;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
- return -EIO;
+ return BLK_STS_IOERR;
}
if (req->cmd_flags & REQ_FUA)
@@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- return -EAGAIN;
+ goto requeue;
}
send_pages:
if (type != NBD_CMD_WRITE)
@@ -715,12 +716,12 @@ send_pages:
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- return -EAGAIN;
+ goto requeue;
}
/*
* The completion might already have come in,
@@ -737,7 +738,16 @@ out:
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
- return 0;
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ return BLK_STS_OK;
+
+requeue:
+ /* retry on a different socket */
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed, requeueing\n");
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ nbd_requeue_cmd(cmd);
+ return BLK_STS_OK;
}
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
- int ret;
+ blk_status_t ret;
lockdep_assert_held(&cmd->lock);
@@ -1072,28 +1082,11 @@ again:
ret = BLK_STS_OK;
goto out;
}
- /*
- * Some failures are related to the link going down, so anything that
- * returns EAGAIN can be retried on a different socket.
- */
ret = nbd_send_cmd(nbd, cmd, index);
- /*
- * Access to this flag is protected by cmd->lock, thus it's safe to set
- * the flag after nbd_send_cmd() succeed to send request to server.
- */
- if (!ret)
- __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
- else if (ret == -EAGAIN) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed, requeueing\n");
- nbd_mark_nsock_dead(nbd, nsock, 1);
- nbd_requeue_cmd(cmd);
- ret = BLK_STS_OK;
- }
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
- return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+ return ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index eb023d267369..75f189e42f88 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -494,6 +494,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
+ ret = count;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
dev->power = newp;
@@ -1823,8 +1824,8 @@ static int null_validate_conf(struct nullb_device *dev)
dev->queue_mode = NULL_Q_MQ;
}
- dev->blocksize = round_down(dev->blocksize, 512);
- dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
+ if (blk_validate_block_size(dev->blocksize))
+ return -EINVAL;
if (dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 5b5a63adacc1..f118d304f310 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -74,6 +74,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
return -EINVAL;
}
+ /*
+ * If a smaller zone capacity was requested, do not allow a smaller last
+ * zone at the same time as such zone configuration does not correspond
+ * to any real zoned device.
+ */
+ if (dev->zone_capacity != dev->zone_size &&
+ dev->size & (dev->zone_size - 1)) {
+ pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
+ return -EINVAL;
+ }
+
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
@@ -108,7 +119,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
- dev->nr_zones);
+ dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index f8f674adf1d4..4acfac73ca9a 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
@@ -1481,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
sizeof(struct mhi_ring_element), 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->ev_ring_el_cache) {
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->tre_buf_cache) {
ret = -ENOMEM;
goto err_destroy_ev_ring_el_cache;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 08844ee79654..14a11880bcea 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -399,6 +399,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -419,8 +421,20 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
- .name = "foxconn-sdx24",
+static const struct mhi_controller_config modem_foxconn_sdx72_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .ready_timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -428,8 +442,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
- .name = "foxconn-sdx55",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
+ .name = "foxconn-t99w175",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
@@ -439,8 +453,19 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
- .name = "foxconn-sdx65",
+static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
+ .name = "foxconn-dw5930e",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
+ .name = "foxconn-t99w368",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -448,6 +473,55 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
+ .name = "foxconn-t99w373",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
+ .name = "foxconn-t99w510",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
+ .name = "foxconn-dw5932e",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
+ .name = "foxconn-t99w515",
+ .edl = "fox/sdx72m/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
+ .name = "foxconn-dw5934e",
+ .edl = "fox/sdx72m/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -646,40 +720,49 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* DW5930e (sdx55), With eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W175 (sdx55) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W368 (sdx65) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info },
/* T99W373 (sdx62) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info },
/* T99W510 (sdx24), variant 1 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 2 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 3 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* DW5932e-eSIM (sdx62), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
+ /* T99W515 (sdx72) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
+ /* DW5934e(sdx72), With eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ /* DW5934e(sdx72), Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -694,7 +777,7 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
@@ -1003,6 +1086,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
mhi_cntrl->mru = info->mru_default;
+ mhi_cntrl->name = info->name;
if (info->edl_trigger)
mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 70d31aed9011..837109ef6766 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -342,5 +342,6 @@ static void __exit bsr_exit(void)
module_init(bsr_init);
module_exit(bsr_exit);
+MODULE_DESCRIPTION("IBM POWER Barrier Synchronization Register Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index bda27e595da1..1c2c8439797c 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -530,5 +530,6 @@ static void __exit dsp56k_cleanup_driver(void)
}
module_exit(dsp56k_cleanup_driver);
+MODULE_DESCRIPTION("Atari DSP56001 Device Driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("dsp56k/bootstrap.bin");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 6946c1cad9f6..5a1a73310e97 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -660,4 +660,5 @@ static char dtlk_write_tts(char ch)
return 0;
}
+MODULE_DESCRIPTION("RC Systems DoubleTalk PC speech card driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index f5c71a617a99..4084df65c9fa 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -64,19 +64,6 @@ static size_t rng_buffer_size(void)
return RNG_BUFFER_SIZE;
}
-static void add_early_randomness(struct hwrng *rng)
-{
- int bytes_read;
-
- mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
- mutex_unlock(&reading_mutex);
- if (bytes_read > 0) {
- size_t entropy = bytes_read * 8 * rng->quality / 1024;
- add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
- }
-}
-
static inline void cleanup_rng(struct kref *kref)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
@@ -340,13 +327,12 @@ static ssize_t rng_current_store(struct device *dev,
const char *buf, size_t len)
{
int err;
- struct hwrng *rng, *old_rng, *new_rng;
+ struct hwrng *rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
- old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
@@ -362,11 +348,8 @@ static ssize_t rng_current_store(struct device *dev,
new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (new_rng != old_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
return err ? : len;
}
@@ -544,7 +527,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -573,25 +555,8 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
- /* to use current_rng in add_early_randomness() we need
- * to take a ref
- */
- is_new_current = true;
- kref_get(&rng->ref);
}
mutex_unlock(&rng_mutex);
- if (is_new_current || !rng->init) {
- /*
- * Use a new device's input to add some randomness to
- * the system. If this rng device isn't going to be
- * used right away, its init function hasn't been
- * called yet by set_current_rng(); so only use the
- * randomness from devices that don't need an init callback
- */
- add_early_randomness(rng);
- }
- if (is_new_current)
- put_rng(rng);
return 0;
out_unlock:
mutex_unlock(&rng_mutex);
@@ -602,12 +567,11 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
- struct hwrng *old_rng, *new_rng;
+ struct hwrng *new_rng;
int err;
mutex_lock(&rng_mutex);
- old_rng = current_rng;
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
@@ -626,11 +590,8 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (old_rng != new_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
wait_for_completion(&rng->cleanup_done);
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 2f171d14b9b5..24417a00dfe9 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -1016,7 +1016,6 @@ static struct parport_driver lp_driver = {
.name = "lp",
.match_port = lp_attach,
.detach = lp_detach,
- .devmodel = true,
};
static int __init lp_init(void)
@@ -1123,4 +1122,5 @@ module_init(lp_init_module);
module_exit(lp_cleanup_module);
MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
+MODULE_DESCRIPTION("Generic parallel printer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index e9f694b36871..9eff426a9286 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -540,6 +540,7 @@ static void __exit nvram_module_exit(void)
module_init(nvram_module_init);
module_exit(nvram_module_exit);
+MODULE_DESCRIPTION("CMOS/NV-RAM driver for Linux");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
MODULE_ALIAS("devname:nvram");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 58e9dcc2a308..eaff98dbaa8c 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -839,7 +839,6 @@ static struct parport_driver pp_driver = {
.probe = pp_probe,
.match_port = pp_attach,
.detach = pp_detach,
- .devmodel = true,
};
static int __init ppdev_init(void)
@@ -882,5 +881,6 @@ static void __exit ppdev_cleanup(void)
module_init(ppdev_init);
module_exit(ppdev_cleanup);
+MODULE_DESCRIPTION("Support for user-space parallel port device drivers");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 896a3550fba9..377bebf6c925 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -47,6 +47,7 @@
#include <linux/uaccess.h>
MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
+MODULE_DESCRIPTION("Telecom Clock driver for Intel NetStructure(tm) MPCBL0010");
MODULE_LICENSE("GPL");
/*Hardware Reset of the PLL */
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index e63a6a17793c..cf0be8a7939d 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,7 +29,7 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default y
+ default X86_64
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index 647c6ca92ac3..cad0048bcc3c 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -223,30 +223,4 @@ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
}
EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
-static u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-/**
- * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
- * @buf: tpm_buf to use
- *
- * Where the parameters are located depends on the tag of a TPM
- * command (it's immediately after the header for TPM_ST_NO_SESSIONS
- * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
- * pointer to the first byte of the parameters area.
- *
- * @return: pointer to parameters area
- */
-u8 *tpm_buf_parameters(struct tpm_buf *buf)
-{
- int offset = TPM_HEADER_SIZE;
-
- if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
- offset += 4;
- return &buf->data[offset];
-}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 6b8b9956ba69..7bb87fa5f7a1 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -28,7 +28,7 @@
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
#define TPM_MINOR 224 /* officially assigned */
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 0cdf892ec2a7..1e856259219e 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -281,6 +281,7 @@ struct tpm2_get_random_out {
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
struct tpm2_get_random_out *out;
+ struct tpm_header *head;
struct tpm_buf buf;
u32 recd;
u32 num_bytes = max;
@@ -288,6 +289,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
int total = 0;
int retries = 5;
u8 *dest_ptr = dest;
+ off_t offset;
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
@@ -320,7 +322,13 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
goto out;
}
- out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
+ head = (struct tpm_header *)buf.data;
+ offset = TPM_HEADER_SIZE;
+ /* Skip the parameter size field: */
+ if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS)
+ offset += 4;
+
+ out = (struct tpm2_get_random_out *)&buf.data[offset];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index ea8860661876..907ac9956a78 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -80,6 +80,9 @@
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
+#define AES_KEY_BYTES AES_KEYSIZE_128
+#define AES_KEY_BITS (AES_KEY_BYTES*8)
+
static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
@@ -954,6 +957,20 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
}
EXPORT_SYMBOL(tpm2_start_auth_session);
+/*
+ * A mask containing the object attributes for the kernel held null primary key
+ * used in HMAC encryption. For more information on specific attributes look up
+ * to "8.3 TPMA_OBJECT (Object Attributes)".
+ */
+#define TPM2_OA_NULL_KEY ( \
+ TPM2_OA_NO_DA | \
+ TPM2_OA_FIXED_TPM | \
+ TPM2_OA_FIXED_PARENT | \
+ TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+ TPM2_OA_USER_WITH_AUTH | \
+ TPM2_OA_DECRYPT | \
+ TPM2_OA_RESTRICTED)
+
/**
* tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
*
@@ -1018,7 +1035,7 @@ static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
val = tpm_buf_read_u32(buf, &offset_t);
/* object properties */
- if (val != TPM2_OA_TMPL)
+ if (val != TPM2_OA_NULL_KEY)
return -EINVAL;
/* auth policy (empty) */
@@ -1178,7 +1195,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
tpm_buf_append_u16(&template, TPM_ALG_SHA256);
/* object properties */
- tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+ tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY);
/* sauth policy (empty) */
tpm_buf_append_u16(&template, 0);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 176cd8dbf1db..fdef214b9f6b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
- flush_work(&priv->free_irq_work);
+ if (priv->free_irq_work.func)
+ flush_work(&priv->free_irq_work);
tpm_tis_clkrun_enable(chip, false);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 13e99cf65efe..690ad8e9b731 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+ return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0;
#else
return false;
#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3f9eaf27b41b..c9eca24bbad4 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -37,6 +37,7 @@
#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
+#define SPI_HDRSIZE 4
/*
* TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4c806a189ee5..d7f841ab4323 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -228,4 +228,5 @@ static void __exit ttyprintk_exit(void)
device_initcall(ttyprintk_init);
module_exit(ttyprintk_exit);
+MODULE_DESCRIPTION("TTY driver to output user messages via printk");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6a77d7e201a9..2f83fb97c6fb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -204,8 +204,15 @@ fail:
pr_err("%pV:%s: %s ID is greater than %zu\n",
&vaf, con_id, failure, max_size);
va_end(ap_copy);
- kfree(cla);
- return NULL;
+
+ /*
+ * Don't fail in this case, but as the entry won't ever match just
+ * fill it with something that also won't match.
+ */
+ strscpy(cla->con_id, "bad", sizeof(cla->con_id));
+ strscpy(cla->dev_id, "bad", sizeof(cla->dev_id));
+
+ return &cla->cl;
}
static struct clk_lookup *
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index 25b8e1a80ddc..b32a59fe55e7 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -4,7 +4,6 @@
* Copyright (C) 2020 Zong Li
*/
-#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
return r;
}
- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
- if (r) {
- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
- init.name, r);
- return r;
- }
-
pd->hw_clks.hws[i] = &pic->hw;
}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 497bc05dca4d..d30d22dfe577 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -138,7 +138,7 @@ config TI_ECAP_CAPTURE
config TI_EQEP
tristate "TI eQEP counter driver"
- depends on (SOC_AM33XX || COMPILE_TEST)
+ depends on SOC_AM33XX || ARCH_K3 || COMPILE_TEST
select REGMAP_MMIO
help
Select this option to enable the Texas Instruments Enhanced Quadrature
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index aea6622a9b13..200876f3ec04 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -322,6 +322,7 @@ static struct platform_driver ftm_quaddec_driver = {
module_platform_driver(ftm_quaddec_driver);
+MODULE_DESCRIPTION("Flex Timer Module Quadrature decoder");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com>");
MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 072b11fd6b32..313b91456f26 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -6,7 +6,9 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/counter.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -67,6 +69,44 @@
#define QEPCTL_UTE BIT(1)
#define QEPCTL_WDE BIT(0)
+#define QEINT_UTO BIT(11)
+#define QEINT_IEL BIT(10)
+#define QEINT_SEL BIT(9)
+#define QEINT_PCM BIT(8)
+#define QEINT_PCR BIT(7)
+#define QEINT_PCO BIT(6)
+#define QEINT_PCU BIT(5)
+#define QEINT_WTO BIT(4)
+#define QEINT_QDC BIT(3)
+#define QEINT_PHE BIT(2)
+#define QEINT_PCE BIT(1)
+
+#define QFLG_UTO BIT(11)
+#define QFLG_IEL BIT(10)
+#define QFLG_SEL BIT(9)
+#define QFLG_PCM BIT(8)
+#define QFLG_PCR BIT(7)
+#define QFLG_PCO BIT(6)
+#define QFLG_PCU BIT(5)
+#define QFLG_WTO BIT(4)
+#define QFLG_QDC BIT(3)
+#define QFLG_PHE BIT(2)
+#define QFLG_PCE BIT(1)
+#define QFLG_INT BIT(0)
+
+#define QCLR_UTO BIT(11)
+#define QCLR_IEL BIT(10)
+#define QCLR_SEL BIT(9)
+#define QCLR_PCM BIT(8)
+#define QCLR_PCR BIT(7)
+#define QCLR_PCO BIT(6)
+#define QCLR_PCU BIT(5)
+#define QCLR_WTO BIT(4)
+#define QCLR_QDC BIT(3)
+#define QCLR_PHE BIT(2)
+#define QCLR_PCE BIT(1)
+#define QCLR_INT BIT(0)
+
/* EQEP Inputs */
enum {
TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
@@ -82,20 +122,14 @@ enum ti_eqep_count_func {
};
struct ti_eqep_cnt {
- struct counter_device counter;
struct regmap *regmap32;
struct regmap *regmap16;
};
-static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter)
-{
- return counter_priv(counter);
-}
-
static int ti_eqep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap32, QPOSCNT, &cnt);
@@ -107,7 +141,7 @@ static int ti_eqep_count_read(struct counter_device *counter,
static int ti_eqep_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
u32 max;
regmap_read(priv->regmap32, QPOSMAX, &max);
@@ -121,7 +155,7 @@ static int ti_eqep_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
@@ -148,7 +182,7 @@ static int ti_eqep_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
enum ti_eqep_count_func qsrc;
switch (function) {
@@ -178,7 +212,7 @@ static int ti_eqep_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
enum counter_function function;
u32 qdecctl;
int err;
@@ -238,19 +272,56 @@ static int ti_eqep_action_read(struct counter_device *counter,
}
}
+static int ti_eqep_events_configure(struct counter_device *counter)
+{
+ struct ti_eqep_cnt *priv = counter_priv(counter);
+ struct counter_event_node *event_node;
+ u32 qeint = 0;
+
+ list_for_each_entry(event_node, &counter->events_list, l) {
+ switch (event_node->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ qeint |= QEINT_PCO;
+ break;
+ case COUNTER_EVENT_UNDERFLOW:
+ qeint |= QEINT_PCU;
+ break;
+ }
+ }
+
+ return regmap_write(priv->regmap16, QEINT, qeint);
+}
+
+static int ti_eqep_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ switch (watch->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ case COUNTER_EVENT_UNDERFLOW:
+ if (watch->channel != 0)
+ return -EINVAL;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct counter_ops ti_eqep_counter_ops = {
.count_read = ti_eqep_count_read,
.count_write = ti_eqep_count_write,
.function_read = ti_eqep_function_read,
.function_write = ti_eqep_function_write,
.action_read = ti_eqep_action_read,
+ .events_configure = ti_eqep_events_configure,
+ .watch_validate = ti_eqep_watch_validate,
};
static int ti_eqep_position_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
@@ -264,7 +335,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
if (ceiling != (u32)ceiling)
return -ERANGE;
@@ -277,7 +348,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
static int ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
@@ -290,7 +361,7 @@ static int ti_eqep_position_enable_read(struct counter_device *counter,
static int ti_eqep_position_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ struct ti_eqep_cnt *priv = counter_priv(counter);
regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
@@ -354,6 +425,25 @@ static struct counter_count ti_eqep_counts[] = {
},
};
+static irqreturn_t ti_eqep_irq_handler(int irq, void *dev_id)
+{
+ struct counter_device *counter = dev_id;
+ struct ti_eqep_cnt *priv = counter_priv(counter);
+ u32 qflg;
+
+ regmap_read(priv->regmap16, QFLG, &qflg);
+
+ if (qflg & QFLG_PCO)
+ counter_push_event(counter, COUNTER_EVENT_OVERFLOW, 0);
+
+ if (qflg & QFLG_PCU)
+ counter_push_event(counter, COUNTER_EVENT_UNDERFLOW, 0);
+
+ regmap_write(priv->regmap16, QCLR, qflg);
+
+ return IRQ_HANDLED;
+}
+
static const struct regmap_config ti_eqep_regmap32_config = {
.name = "32-bit",
.reg_bits = 32,
@@ -376,7 +466,8 @@ static int ti_eqep_probe(struct platform_device *pdev)
struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
- int err;
+ struct clk *clk;
+ int err, irq;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
@@ -397,6 +488,15 @@ static int ti_eqep_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap16))
return PTR_ERR(priv->regmap16);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, NULL, ti_eqep_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), counter);
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to request IRQ\n");
+
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &ti_eqep_counter_ops;
@@ -415,6 +515,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
+
err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
@@ -437,6 +541,7 @@ static void ti_eqep_remove(struct platform_device *pdev)
static const struct of_device_id ti_eqep_of_match[] = {
{ .compatible = "ti,am3352-eqep", },
+ { .compatible = "ti,am62-eqep", },
{ },
};
MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f04ae67dda37..fc275d41d51e 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -26,10 +26,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
-#include <linux/amd-pstate.h>
#include <acpi/cppc_acpi.h>
+#include "amd-pstate.h"
+
/*
* Abbreviations:
* amd_pstate_ut: used as a shortform for AMD P-State unit test.
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1b7e82a0ad2e..9ad62dbe8bfb 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
-#include <linux/amd-pstate.h>
#include <linux/topology.h>
#include <acpi/processor.h>
@@ -46,6 +45,8 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
@@ -53,6 +54,37 @@
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
#define CPPC_HIGHEST_PERF_DEFAULT 166
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+ NULL,
+};
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
+
/*
* TODO: We need more time to fine tune processors with shared memory solution
* with community together.
@@ -669,7 +701,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
if (state)
policy->cpuinfo.max_freq = cpudata->max_freq;
else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
policy->max = policy->cpuinfo.max_freq;
diff --git a/include/linux/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index d58fc022ec46..e6a28e7f4dbf 100644
--- a/include/linux/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/include/linux/amd-pstate.h
- *
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*
* Author: Meng Li <li.meng@amd.com>
@@ -12,11 +10,6 @@
#include <linux/pm_qos.h>
-#define AMD_CPPC_EPP_PERFORMANCE 0x00
-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
-#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
-#define AMD_CPPC_EPP_POWERSAVE 0xFF
-
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
@@ -108,30 +101,4 @@ struct amd_cpudata {
bool suspended;
};
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
-static const char * const amd_pstate_mode_string[] = {
- [AMD_PSTATE_UNDEFINED] = "undefined",
- [AMD_PSTATE_DISABLE] = "disable",
- [AMD_PSTATE_PASSIVE] = "passive",
- [AMD_PSTATE_ACTIVE] = "active",
- [AMD_PSTATE_GUIDED] = "guided",
- NULL,
-};
-
-struct quirk_entry {
- u32 nominal_freq;
- u32 lowest_freq;
-};
-
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b986c044741..c31914a9876f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -355,15 +355,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
- if (ret)
- return;
-
/*
- * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
- * In this case we can't use CPPC.highest_perf to enable ITMT.
- * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
+ *
+ * Also, on some systems with overclocking enabled, CPPC.highest_perf is
+ * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
+ * Fall back to MSR_HWP_CAPABILITIES then too.
*/
- if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
@@ -1153,7 +1152,8 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- intel_pstate_get_hwp_cap(cpudata);
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
@@ -1301,12 +1301,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
no_turbo = !!clamp_t(int, input, 0, 1);
- if (no_turbo == global.no_turbo)
- goto unlock_driver;
-
- if (global.turbo_disabled) {
- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
+ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
+ if (global.turbo_disabled && !no_turbo) {
+ pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
count = -EPERM;
+ if (global.no_turbo)
+ goto unlock_driver;
+ else
+ no_turbo = 1;
+ }
+
+ if (no_turbo == global.no_turbo) {
goto unlock_driver;
}
@@ -1761,7 +1766,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1926,7 +1931,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
return val;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6f9266edc9f1..eac73cbfdd38 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -39,7 +39,8 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_rl.o \
qat_uclo.o \
qat_hal.o \
- qat_bl.o
+ qat_bl.o \
+ qat_mig_dev.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
@@ -56,6 +57,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 00a9f0eef8dd..3c2b6144be23 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
struct device *dev;
int rc;
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
- dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
- return ERR_PTR(-EINVAL);
- }
-
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
@@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
{
int rc;
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
+ default:
+ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+ return ERR_PTR(-EINVAL);
+ }
+
rc = memregion_alloc(GFP_KERNEL);
if (rc < 0)
return ERR_PTR(rc);
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index ed3dac546dd6..f5cedf816be1 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -17,6 +17,7 @@
#define DCA_VERSION "1.12.1"
MODULE_VERSION(DCA_VERSION);
+MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index b7c6f7ea9e0c..6a1bfcd0cc21 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
+ if (IS_ERR(t[i].task)) {
+ ret = PTR_ERR(t[i].task);
+ while (--i >= 0)
+ kthread_stop_put(t[i].task);
+ return ret;
+ }
get_task_struct(t[i].task);
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 101394f16930..237bce21d1e7 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irq(&obj->lock);
+ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irq(&obj->lock);
+ spin_unlock(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 002a5ec80620..9fc99cfbef08 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -394,7 +394,7 @@ config LS2X_APB_DMA
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
- depends on M5441x || COMPILE_TEST
+ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index bdb752f11869..36943b0c6d60 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -620,6 +620,45 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
return sg;
}
+static struct dma_async_tx_descriptor *
+axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs,
+ size_t nb, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ unsigned int num_sgs = 0;
+ struct axi_dmac_sg *dsg;
+ size_t i;
+
+ if (direction != chan->direction)
+ return NULL;
+
+ for (i = 0; i < nb; i++)
+ num_sgs += DIV_ROUND_UP(vecs[i].len, chan->max_length);
+
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
+ if (!desc)
+ return NULL;
+
+ dsg = desc->sg;
+
+ for (i = 0; i < nb; i++) {
+ if (!axi_dmac_check_addr(chan, vecs[i].addr) ||
+ !axi_dmac_check_len(chan, vecs[i].len)) {
+ kfree(desc);
+ return NULL;
+ }
+
+ dsg = axi_dmac_fill_linear_sg(chan, direction, vecs[i].addr, 1,
+ vecs[i].len, dsg);
+ }
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -1061,6 +1100,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+ dma_dev->device_prep_peripheral_dma_vec = axi_dmac_prep_peripheral_dma_vec;
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
dma_dev->device_terminate_all = axi_dmac_terminate_all;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 8dc029c86551..fc049c9c9892 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
spin_unlock(&irq_entry->list_lock);
- list_for_each_entry(desc, &flist, list) {
+ list_for_each_entry_safe(desc, n, &flist, list) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
+ list_del(&desc->list);
+
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92cb82..e8f45a7fded4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -534,18 +534,6 @@ err_out:
return err;
}
-static int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- dma_pool_destroy(ioat_dma->completion_pool);
- }
-
- return err;
-}
-
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
}
- err = ioat_register(ioat_dma);
+ err = dma_async_device_register(&ioat_dma->dma_dev);
if (err)
- return err;
+ goto err_disable_interrupts;
ioat_kobject_add(ioat_dma, &ioat_ktype);
@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
/* clear relaxed ordering enable */
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
return 0;
+
+err_disable_interrupts:
+ ioat_disable_interrupts(ioat_dma);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ return err;
}
static void ioat_shutdown(struct pci_dev *pdev)
@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
+ unsigned int i;
+ u8 version;
int err;
err = pcim_enable_device(pdev);
@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!iomap)
return -ENOMEM;
+ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
+ if (version < IOAT_VER_3_0)
+ return -ENODEV;
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
return err;
@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, device);
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ device->version = version;
if (device->version >= IOAT_VER_3_4)
ioat_dca_enabled = 0;
- if (device->version >= IOAT_VER_3_0) {
- if (is_skx_ioat(pdev))
- device->version = IOAT_VER_3_2;
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- } else
- return -ENODEV;
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
if (err) {
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(device->idx[i]);
+ kfree(device);
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_sed_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c9b93055dc9d..f0a399cf45b2 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
- goto out_put_spec;
+ return ret;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
-
-out_put_spec:
- of_node_put(udmax_np);
return ret;
}
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e143a7330816..718842fdaf98 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ spin_lock(&xchan->vchan.lock);
+
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
- spin_lock(&xchan->vchan.lock);
-
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 1f3520d76861..a17f3c0cdfa6 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
@@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
/*
@@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt)
}
ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
- if (ret)
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
goto out;
+ }
gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index cdd8480e7368..dbe9fe5f2ca6 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
if (enable)
errcmd |= ERRCMD_CE | ERRSTS_UE;
@@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
return 0;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 869598b20e3a..5268b3f0a25a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -11,7 +11,7 @@ config FIREWIRE
This is the new-generation IEEE 1394 (FireWire) driver stack
a.k.a. Juju, a new implementation designed for robustness and
simplicity.
- See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ See http://ieee1394.docs.kernel.org/en/latest/migration.html
for information about migration from the older Linux 1394 stack
to the new driver stack.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 127d87e3a153..f8b99dd6cd82 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset)
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
- trace_bus_reset_initiate(card->generation, short_reset);
+ trace_bus_reset_initiate(card->index, card->generation, short_reset);
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
- trace_bus_reset_schedule(card->generation, short_reset);
+ trace_bus_reset_schedule(card->index, card->generation, short_reset);
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
@@ -249,7 +249,7 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
- trace_bus_reset_postpone(card->generation, card->br_short);
+ trace_bus_reset_postpone(card->index, card->generation, card->br_short);
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 55993c9e0b90..9a7dc90330a3 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
struct client *e_client = e->client;
u32 rcode;
- trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
packet->timestamp);
switch (status) {
@@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
memcpy(pp->data, a->data, sizeof(a->data));
}
- trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
- e->p.header[2]);
+ trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
+ e->p.header[1], e->p.header[2]);
card->driver->send_request(card, &e->p);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 837cc44d8d9f..8107eebd4296 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
- trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count);
+ trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
spin_lock_irqsave(&card->lock, flags);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 571fdff65c2b..76ab6a209768 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet,
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
- trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed,
- status, packet->timestamp);
+ trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
+ packet->speed, status, packet->timestamp);
switch (status) {
case ACK_COMPLETE:
@@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload,
+ trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
+ t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
card->driver->send_request(card, &t->packet);
@@ -463,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
packet->timestamp);
complete(&phy_config_done);
}
@@ -503,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card,
phy_config_packet.generation = generation;
reinit_completion(&phy_config_done);
- trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet,
+ trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
phy_config_packet.generation, phy_config_packet.header[1],
phy_config_packet.header[2]);
@@ -674,7 +675,7 @@ static void free_response_callback(struct fw_packet *packet,
{
struct fw_request *request = container_of(packet, struct fw_request, response);
- trace_async_response_outbound_complete((uintptr_t)request, packet->generation,
+ trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
packet->speed, status, packet->timestamp);
// Decrease the reference count since not at in-flight.
@@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card,
// Increase the reference count so that the object is kept during in-flight.
fw_request_get(request);
- trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation,
- request->response.speed, request->response.header,
- data, data ? data_length / 4 : 0);
+ trace_async_response_outbound_initiate((uintptr_t)request, card->index,
+ request->response.generation, request->response.speed,
+ request->response.header, data,
+ data ? data_length / 4 : 0);
card->driver->send_response(card, &request->response);
}
@@ -995,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
tcode = async_header_get_tcode(p->header);
if (tcode_is_link_internal(tcode)) {
- trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp,
+ trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
p->header[1], p->header[2]);
fw_cdev_handle_phy_packet(card, p);
return;
@@ -1007,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
return;
}
- trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack,
- p->timestamp, p->header, request->data,
+ trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
+ p->ack, p->timestamp, p->header, request->data,
tcode_is_read_request(tcode) ? 0 : request->length / 4);
offset = async_header_get_offset(p->header);
@@ -1078,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp,
- p->header, data, data_length / 4);
+ trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
+ p->timestamp, p->header, data, data_length / 4);
if (!t) {
timed_out:
diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c
index f93c966e794d..e83b1fece780 100644
--- a/drivers/firewire/packet-serdes-test.c
+++ b/drivers/firewire/packet-serdes-test.c
@@ -579,4 +579,5 @@ static struct kunit_suite packet_serdes_test_suite = {
};
kunit_test_suite(packet_serdes_test_suite);
+MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite");
MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/uapi-test.c b/drivers/firewire/uapi-test.c
index 2fcbede4fab1..bc3f10a2e516 100644
--- a/drivers/firewire/uapi-test.c
+++ b/drivers/firewire/uapi-test.c
@@ -86,4 +86,5 @@ static struct kunit_suite structure_layout_test_suite = {
};
kunit_test_suite(structure_layout_test_suite);
+MODULE_DESCRIPTION("FireWire UAPI unit test suite");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 5b9dc26e6bcb..552c78f5f059 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record,
&size, record->buf);
if (status != EFI_SUCCESS) {
kfree(record->buf);
- return -EIO;
+ return efi_status_to_err(status);
}
/*
@@ -189,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
return 0;
if (status != EFI_SUCCESS)
- return -EIO;
+ return efi_status_to_err(status);
/* skip variables that don't concern us */
if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
@@ -227,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record)
record->size, record->psi->buf,
true);
efivar_unlock();
- return status == EFI_SUCCESS ? 0 : -EIO;
+ return efi_status_to_err(status);
};
static int efi_pstore_erase(struct pstore_record *record)
@@ -238,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record)
PSTORE_EFI_ATTRIBUTES, 0, NULL);
if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- return -EIO;
+ return efi_status_to_err(status);
return 0;
}
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 684c9354637c..d0ef93551c44 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
efi_loaded_image_t *image)
{
- return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
+ return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
}
efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index ac8c0ef85158..af2c82f7bd90 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -41,6 +41,7 @@ SECTIONS
}
/DISCARD/ : {
+ *(.discard .discard.*)
*(.modinfo .init.modinfo)
}
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 3365944f7965..34109fd86c55 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,10 +15,6 @@
#include <asm/early_ioremap.h>
#include <asm/efi.h>
-#ifndef __efi_memmap_free
-#define __efi_memmap_free(phys, size, flags) do { } while (0)
-#endif
-
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
return -ENOMEM;
}
- if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
- __efi_memmap_free(efi.memmap.phys_map,
- efi.memmap.desc_size * efi.memmap.nr_map,
- efi.memmap.flags);
-
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 5d56bc40a79d..708b777857d3 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
*/
-static void efi_call_rts(struct work_struct *work)
+static void __nocfi efi_call_rts(struct work_struct *work)
{
const union efi_rts_args *args = efi_rts_work.args;
efi_status_t status = EFI_NOT_FOUND;
@@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
return status;
}
-static efi_status_t
+static efi_status_t __nocfi
virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
unsigned long data_size, void *data)
{
@@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
return status;
}
-static efi_status_t
+static efi_status_t __nocfi
virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
u64 *remaining_space, u64 *max_variable_size)
{
@@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
return status;
}
-static void virt_efi_reset_system(int reset_type,
- efi_status_t status,
- unsigned long data_size,
- efi_char16_t *data)
+static void __nocfi
+virt_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
{
if (down_trylock(&efi_runtime_lock)) {
pr_warn("failed to invoke the reset_system() runtime service:\n"
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index d9629ff87861..2328ca58bba6 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
+ int err;
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
+ return psci_to_linux_errno(err);
}
static int psci_system_suspend_enter(suspend_state_t state)
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index 6b60ca004345..f4de3fea0b2d 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -75,12 +75,6 @@ static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
return _alt_fpga2sdram_enable_set(bridge->priv, enable);
}
-struct prop_map {
- char *prop_name;
- u32 *prop_value;
- u32 prop_max;
-};
-
static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
.enable_set = alt_fpga2sdram_enable_set,
.enable_show = alt_fpga2sdram_enable_show,
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
index d4e55204c092..e4a64815f16d 100644
--- a/drivers/fpga/tests/Kconfig
+++ b/drivers/fpga/tests/Kconfig
@@ -1,6 +1,6 @@
config FPGA_KUNIT_TESTS
- bool "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
- depends on FPGA=y && FPGA_REGION=y && FPGA_BRIDGE=y && KUNIT=y && MODULES=n
+ tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y
default KUNIT_ALL_TESTS
help
This builds unit tests for the FPGA subsystem
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3dbddec07028..1c28a48915bb 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1576,7 +1576,7 @@ config GPIO_TPS68470
are "output only" GPIOs.
config GPIO_TQMX86
- tristate "TQ-Systems QTMX86 GPIO"
+ tristate "TQ-Systems TQMx86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index bb499e362912..1d0175d6350b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
else
nirq = DIV_ROUND_UP(ngpio, 16);
+ if (nirq > MAX_INT_PER_BANK) {
+ dev_err(dev, "Too many IRQs!\n");
+ return -EINVAL;
+ }
+
chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index c693fe05d50f..f2e911a3d2ca 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -296,6 +296,8 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ raw_spin_lock_init(&priv->lock);
+
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 899335da93c7..7e29a2d8de1a 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -130,5 +130,6 @@ static struct i2c_driver gw_pld_driver = {
};
module_i2c_driver(gw_pld_driver);
+MODULE_DESCRIPTION("Gateworks I2C PLD GPIO expander");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index cd9b16dbe1a9..94f6fefc011b 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -168,5 +168,6 @@ static void __exit mc33880_exit(void)
module_exit(mc33880_exit);
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 77a2812f2974..732a6964748c 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -758,6 +758,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
int level;
if (chip->driver_data & PCA_PCAL) {
+ guard(mutex)(&chip->i2c_lock);
+
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 53b69abe6787..7c57eaeb0afe 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -438,5 +438,6 @@ static void __exit pcf857x_exit(void)
}
module_exit(pcf857x_exit);
+MODULE_DESCRIPTION("Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 9fc1f3dd4190..a211a02d4b4a 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -438,4 +438,5 @@ static struct amba_driver pl061_gpio_driver = {
};
module_amba_driver(pl061_gpio_driver);
+MODULE_DESCRIPTION("Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 3a28c1f273c3..f2e7e8754d95 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -6,6 +6,7 @@
* Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
*/
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -28,16 +29,25 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
+#define TQMX86_GPII_NONE 0
#define TQMX86_GPII_FALLING BIT(0)
#define TQMX86_GPII_RISING BIT(1)
+/* Stored in irq_type as a trigger type, but not actually valid as a register
+ * value, so the name doesn't use "GPII"
+ */
+#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
#define TQMX86_GPII_BITS 2
+/* Stored in irq_type with GPII bits */
+#define TQMX86_INT_UNMASKED BIT(2)
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
int irq;
+ /* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
+ DECLARE_BITMAP(output, TQMX86_NGPIO);
u8 irq_type[TQMX86_NGPI];
};
@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
unsigned long flags;
- u8 val;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
- if (value)
- val |= BIT(offset);
- else
- val &= ~BIT(offset);
- tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+ __must_hold(&gpio->spinlock)
+{
+ u8 type = TQMX86_GPII_NONE, gpiic;
+
+ if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+
+ if (type == TQMX86_INT_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
+ ? TQMX86_GPII_FALLING
+ : TQMX86_GPII_RISING;
+ }
+
+ gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+ gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
+ gpiic |= type << (offset * TQMX86_GPII_BITS);
+ tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+}
+
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
@@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
+
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
unsigned long flags;
- u8 new_type, gpiic;
+ u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
@@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
new_type = TQMX86_GPII_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- gpio->irq_type[offset] = new_type;
-
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
- gpiic |= new_type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
+ gpio->irq_type[offset] |= new_type;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
return 0;
@@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits;
- int i = 0;
+ unsigned long irq_bits, flags;
+ int i;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
irq_bits = irq_status;
+
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ /*
+ * Edge-both triggers are implemented by flipping the edge
+ * trigger after each interrupt, as the controller only supports
+ * either rising or falling edge triggers, but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate status
+ * registers for rising and falling edge interrupts. GPIIC
+ * configures which bits from which register are visible in the
+ * interrupt status register GPIIS and defines what triggers the
+ * parent IRQ line. Writing to GPIIS always clears both rising
+ * and falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the edge-both
+ * trigger in software by first clearing the interrupt and then
+ * setting the new trigger based on the current GPIO input in
+ * tqmx86_gpio_irq_config() - even if an edge arrives between
+ * reading the input and setting the trigger, we will have a new
+ * interrupt pending.
+ */
+ if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
+ tqmx86_gpio_irq_config(gpio, i);
+ }
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
i + TQMX86_NGPO);
@@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+ /*
+ * Reading the previous output state is not possible with TQMx86 hardware.
+ * Initialize all outputs to 0 to have a defined state that matches the
+ * shadow register.
+ */
+ tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
+
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 9dad67ea2597..5639abce6ec5 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -89,6 +89,10 @@ struct linehandle_state {
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
+#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT)
+
static int linehandle_validate_flags(u32 flags)
{
/* Return an error if an unknown flag is set */
@@ -169,21 +173,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
+ /* Lines must be reconfigured explicitly as input or output. */
+ if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
+ return -EINVAL;
+
for (i = 0; i < lh->num_descs; i++) {
desc = lh->descs[i];
- linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
+ linehandle_flags_to_desc_flags(lflags, &desc->flags);
- /*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
- */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
@@ -1530,12 +1534,14 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(&lc, i);
- gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
- edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
+ * Lines not explicitly reconfigured as input or output
+ * are left unchanged.
*/
+ if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
+ continue;
+ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(&lc, i);
@@ -1543,7 +1549,7 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 026444eeb5c6..d0aa277fc3bf 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN
config DRM_WERROR
bool "Compile the drm subsystem with warnings as errors"
depends on DRM && EXPERT
+ depends on !WERROR
default n
help
A kernel build should not cause any compiler warnings, and this
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 8975cf41a91a..48ad0c04aa72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
- (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ (adev->flags & AMD_IS_APU) ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
+ if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
+ ((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
- !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ !(adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 108003bdf1e9..2e13c7c4b2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -400,7 +400,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
+ *vram_width = mem_channel_number * 16;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 861ccff78af9..33f791d92ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5220,11 +5220,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU mode1 reset\n");
+ /* Cache the state before bus master disable. The saved config space
+ * values are used in other cases like restore after mode-2 reset.
+ */
+ amdgpu_device_cache_pci_state(adev->pdev);
+
/* disable BM */
pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
-
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);
@@ -5944,13 +5947,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
- while ((parent = pci_upstream_bridge(parent))) {
- /* skip upstream/downstream switches internal to dGPU*/
- if (parent->vendor == PCI_VENDOR_ID_ATI)
- continue;
- *speed = pcie_get_speed_cap(parent);
- *width = pcie_get_width_cap(parent);
- break;
+ if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+ } else {
+ /* use the current speeds rather than max if switching is not supported */
+ pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 055ba2ea4c12..662d0f28f358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -41,8 +41,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
-#include <linux/pm_runtime.h>
-#include "amdgpu_trace.h"
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int r;
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
- if (r < 0)
- goto out;
-
return 0;
-
-out:
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
- return r;
-}
-
-/**
- * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
- *
- * @dmabuf: DMA-buf where we remove the attachment from
- * @attach: the attachment to remove
- *
- * Called when an attachment is removed from the DMA-buf.
- */
-static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
}
/**
@@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
- .detach = amdgpu_dma_buf_detach,
.pin = amdgpu_dma_buf_pin,
.unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 10832b470448..bc3ac73b6b8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
} while (last_seq != seq);
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 67c234bcf89f..3adaa4670103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
memset(&bp, 0, sizeof(bp));
*obj = NULL;
+ flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bp.size = size;
bp.byte_align = alignment;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index be4629cdac04..08b9dfb65335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- signed long r;
+ int r;
uint32_t seq;
- if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
- !down_read_trylock(&adev->reset_domain->sem)) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
flush_type, all_hub,
inst);
- return 0;
- }
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
- /* 2 dwords flush + 8 dwords fence */
- ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_0)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ amdgpu_ring_alloc(ring, ndw);
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
- if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
+ amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- goto error_unlock_reset;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- r = -ETIME;
- goto error_unlock_reset;
+ if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ }
}
- r = 0;
error_unlock_reset:
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8d8c39be6129..c556c8b653fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
-
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4bd4602d11b1..cef9dd0a012b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
}
}
+static bool psp_err_warn(struct psp_context *psp)
+{
+ struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
+
+ /* This response indicates reg list is already loaded */
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
+ cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
+ cmd->resp.status == TEE_ERROR_CANCEL)
+ return false;
+
+ return true;
+}
+
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
@@ -699,10 +713,13 @@ psp_cmd_submit_buf(struct psp_context *psp,
dev_warn(psp->adev->dev,
"failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- dev_warn(psp->adev->dev,
- "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status);
+ if (psp_err_warn(psp))
+ dev_warn(
+ psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing.
* Also return failure in case of timeout
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index c8980d5f6540..7021c4a66fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 7aafeb763e5d..383fce40d4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
__entry->value)
);
-TRACE_EVENT(amdgpu_runpm_reference_dumps,
- TP_PROTO(uint32_t index, const char *func),
- TP_ARGS(index, func),
- TP_STRUCT__entry(
- __field(uint32_t, index)
- __string(func, func)
- ),
- TP_fast_assign(
- __entry->index = index;
- __assign_str(func);
- ),
- TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
- __entry->index,
- __get_str(func))
-);
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index e30eecd02ae1..fde66225c481 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return 0;
}
afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
+
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
+ struct drm_gem_object *obj;
int r;
if (!old_state->fb)
return;
- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+ obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 7fdd306a48a0..f07647a9a9d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
- struct amdgpu_bo *bo = parent->bo, *pbo;
+ struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags;
unsigned int level;
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 414ea3f560a7..d4e2aed2efa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->gmc.num_mem_partitions == num_xcc / 2)
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
- AMDGPU_QPX_PARTITION_MODE;
+ AMDGPU_CPX_PARTITION_MODE;
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
return AMDGPU_DPX_PARTITION_MODE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 7b16e8cca86a..f5b9f443cfdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -4195,9 +4195,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, xcc_id, active_cu_number = 0;
- u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
unsigned disable_masks[4 * 4];
+ bool is_symmetric_cus;
if (!adev || !cu_info)
return -EINVAL;
@@ -4215,6 +4216,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ is_symmetric_cus = true;
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
@@ -4242,6 +4244,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
+ if (i && is_symmetric_cus && prev_counter != counter)
+ is_symmetric_cus = false;
+ prev_counter = counter;
+ }
+ if (is_symmetric_cus) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 0d1407f25005..32d4519541c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
- int ndw = size / 4;
- signed long r;
- union MESAPI__MISC *x_pkt = pkt;
- struct MES_API_STATUS *api_status;
+ union MESAPI__QUERY_MES_STATUS mes_status_pkt;
+ signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
- unsigned long flags;
- signed long timeout = 3000000; /* 3000 ms */
+ struct MES_API_STATUS *api_status;
+ union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
- u32 fence_offset;
- u64 fence_gpu_addr;
- u64 *fence_ptr;
+ unsigned long flags;
+ u64 status_gpu_addr;
+ u32 status_offset;
+ u64 *status_ptr;
+ signed long r;
int ret;
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
@@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
- BUG_ON(size % 4 != 0);
- ret = amdgpu_device_wb_get(adev, &fence_offset);
+ ret = amdgpu_device_wb_get(adev, &status_offset);
if (ret)
return ret;
- fence_gpu_addr =
- adev->wb.gpu_addr + (fence_offset * 4);
- fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
- *fence_ptr = 0;
+
+ status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
+ status_ptr = (u64 *)&adev->wb.wb[status_offset];
+ *status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
- if (amdgpu_ring_alloc(ring, ndw)) {
- spin_unlock_irqrestore(&mes->ring_lock, flags);
- amdgpu_device_wb_free(adev, fence_offset);
- return -ENOMEM;
- }
+ r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
+ if (r)
+ goto error_unlock_free;
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
- amdgpu_ring_write_multiple(ring, pkt, ndw);
+ amdgpu_ring_write_multiple(ring, pkt, size / 4);
+
+ memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
+ mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
+ mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_status_pkt.api_status.api_completion_fence_addr =
+ ring->fence_drv.gpu_addr;
+ mes_status_pkt.api_status.api_completion_fence_value =
+ ++ring->fence_drv.sync_seq;
+
+ amdgpu_ring_write_multiple(ring, &mes_status_pkt,
+ sizeof(mes_status_pkt) / 4);
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
@@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
+ misc_op_str);
else if (op_str)
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n",
+ x_pkt->header.opcode);
- r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
- amdgpu_device_wb_free(adev, fence_offset);
- if (r < 1) {
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ if (r < 1 || !*status_ptr) {
if (misc_op_str)
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
@@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
while (halt_if_hws_hang)
schedule();
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto error_wb_free;
}
+ amdgpu_device_wb_free(adev, status_offset);
return 0;
+
+error_unlock_free:
+ spin_unlock_irqrestore(&mes->ring_lock, flags);
+
+error_wb_free:
+ amdgpu_device_wb_free(adev, status_offset);
+ return r;
}
static int convert_to_mes_queue_type(int queue_type)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 7566973ed8f5..37b5ddd6f13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -464,8 +464,9 @@ struct psp_gfx_rb_frame
#define PSP_ERR_UNKNOWN_COMMAND 0x00000100
enum tee_error_code {
- TEE_SUCCESS = 0x00000000,
- TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
+ TEE_SUCCESS = 0x00000000,
+ TEE_ERROR_CANCEL = 0xFFFF0002,
+ TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
};
#endif /* _PSP_TEE_GFX_IF_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index f08a32c18694..40b28298af30 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -32,7 +32,9 @@
#include "mp/mp_14_0_2_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -66,6 +68,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9596bca57212..afc57df421cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- if ((adev->pdev->device == 0x7460 &&
- adev->pdev->revision == 0x00) ||
- (adev->pdev->device == 0x7461 &&
- adev->pdev->revision == 0x00))
- /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
- gfx_target_version = 110005;
- else
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 5, 0):
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4816fcb9803a..8ee3d07ffbdf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return 0;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 069b81eeea03..31e500859ab0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (node->adev->gmc.is_app_apu ||
- node->adev->flags & AMD_IS_APU)
+ if (node->adev->flags & AMD_IS_APU)
return 0;
if (prange->preferred_loc == gpuid ||
@@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
- if (bo_node->adev->gmc.is_app_apu ||
- bo_node->adev->flags & AMD_IS_APU) {
+ if (bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0;
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9c37bd0567ef..70c1776611c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
- (adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5fcd4f778dc3..47b8b49da8a7 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f1d67c6f4b98..e9ac20bed0f2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9169,9 +9169,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
- dc_allow_idle_optimizations(dm->dc, false);
-
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -11440,6 +11437,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
mutex_unlock(&adev->dm.dc_lock);
}
+static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+ dc_exit_ips_for_hw_access(dc);
+}
+
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
u32 value, const char *func_name)
{
@@ -11450,6 +11453,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
return;
}
#endif
+
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
cgs_write_register(ctx->cgs_device, address, value);
trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
@@ -11473,6 +11478,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return 0;
}
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 35733d5c5193..a5e1a93ddaea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base,
dev->mode_config.tile_property,
0);
+ connector->colorspace_property = master->base.colorspace_property;
+ if (connector->colorspace_property)
+ drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 60f251cf973b..beed7adbbd43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index e4f333d4fb54..a201dbb743d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 5295f52e4fc8..dcced89c07b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1439,3 +1439,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
}
}
}
+
+static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
+{
+ /* Calculate average pixel count per TU, return false if under ~2.00 to
+ * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
+ * are legal to generate for native DP links. Assume TU size 64 as there
+ * is currently no scenario where it's reprogrammed from HW default.
+ * MTPs have no such limitation, so this does not affect MST use cases.
+ */
+ unsigned int pix_clk_mhz;
+ unsigned int symclk_mhz;
+ unsigned int avg_pix_per_tu_x1000;
+ unsigned int tu_size_bytes = 64;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
+ const struct dc *dc = pipe_ctx->stream->link->dc;
+
+ if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ // Not necessary for MST configurations
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return false;
+
+ pix_clk_mhz = timing->pix_clk_100hz / 10000;
+
+ // If this is true, can't block due to dynamic ODM
+ if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
+ return false;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ symclk_mhz = 162;
+ break;
+ case LINK_RATE_HIGH:
+ symclk_mhz = 270;
+ break;
+ case LINK_RATE_HIGH2:
+ symclk_mhz = 540;
+ break;
+ case LINK_RATE_HIGH3:
+ symclk_mhz = 810;
+ break;
+ default:
+ // We shouldn't be tunneling any other rates, something is wrong
+ ASSERT(0);
+ return false;
+ }
+
+ avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
+ / (symclk_mhz * link_settings->lane_count);
+
+ // Add small empirically-decided margin to account for potential jitter
+ return (avg_pix_per_tu_x1000 < 2020);
+}
+
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
+ if (should_avoid_empty_tu(pipe_ctx))
+ return false;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index a731c8880d60..f0ea7d1511ae 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -95,4 +95,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index df3bf77f3fb4..199781233fd5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a01d0842bf8e..d487dfcd219b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (dp_is_lttpr_present(link))
+ if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link);
+ // Echo TOTAL_LTTPR_CNT back downstream
+ core_link_write_dpcd(
+ link,
+ DP_TOTAL_LTTPR_CNT,
+ &link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
+ sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
+ }
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 914f28e9f224..aee5170f5fb2 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+/* Remove once drm_dp_helper.h is updated upstream */
+#ifndef DP_TOTAL_LTTPR_CNT
+#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
+#endif
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 1acb2d2c5597..571691837200 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
- struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
+ struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
};
struct atom_svid2_voltage_object_v4
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 2e8e6c9875f6..f83ace2d7ec3 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
}StateArray;
typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[];
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[];
}ClockInfoArray;
typedef struct _NonClockInfoArray{
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
@@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
union _ATOM_PPLIB_CAC_Leakage_Record
@@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_CAC_Leakage_Table;
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
@@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_PhaseSheddingLimits_Table;
typedef struct _VCEClockInfo{
@@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{
}VCEClockInfo;
typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[] __counted_by(ucNumEntries);
}VCEClockInfoArray;
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
@@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_VCE_State_Record
@@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record
typedef struct _ATOM_PPLIB_VCE_State_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_State_Table;
@@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{
}UVDClockInfo;
typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[] __counted_by(ucNumEntries);
}UVDClockInfoArray;
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
@@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_UVD_Table
@@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_SAMU_Table
@@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_ACP_Table
@@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{
} ATOM_PPLIB_VQ_Budgeting_Record;
typedef struct ATOM_PPLIB_VQ_Budgeting_Table {
- UCHAR revid;
- UCHAR numEntries;
- ATOM_PPLIB_VQ_Budgeting_Record entries[1];
+ UCHAR revid;
+ UCHAR numEntries;
+ ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries);
} ATOM_PPLIB_VQ_Budgeting_Table;
#pragma pack()
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 6bb42d04b247..e8b6989a40f3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7789b313285c..e1796ecf9c05 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
return ret;
}
+static int smu_set_mall_enable(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_mall_enable)
+ return 0;
+
+ ret = smu->ppt_funcs->set_mall_enable(smu);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -1791,6 +1803,7 @@ static int smu_hw_init(void *handle)
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
+ smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 0917dec8efe3..64ccdb5f14ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1395,6 +1395,11 @@ struct pptable_funcs {
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_mall_enable: Init MALL power gating control.
+ */
+ int (*set_mall_enable)(struct smu_context *smu);
+
+ /**
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index c4dc5881d8df..e7f5ef49049f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -106,8 +106,8 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
-#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
+#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c48214e3dc8e..2e32b085824a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -272,7 +272,9 @@
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
- __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
+ __SMU_DUMMY_MAP(MALLPowerController), \
+ __SMU_DUMMY_MAP(MALLPowerState),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bc241b593db1..b6257f34a7c6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && adev->in_s4) {
- /* Adds a GFX reset as workaround just before sending the
- * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
- * an invalid state.
- */
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
- SMU_RESET_MODE_2, NULL);
- if (ret)
- return ret;
+ if (!en && !adev->in_s0ix) {
+ if (adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+ }
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index e4419e1561ef..18abfbd6d059 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -52,6 +52,19 @@
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+/* MALLPowerController message arguments (Defines for the Cache mode control) */
+#define SMU_MALL_PMFW_CONTROL 0
+#define SMU_MALL_DRIVER_CONTROL 1
+
+/*
+ * MALLPowerState message arguments
+ * (Defines for the Allocate/Release Cache mode if in driver mode)
+ */
+#define SMU_MALL_EXIT_PG 0
+#define SMU_MALL_ENTER_PG 1
+
+#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -66,6 +79,12 @@
FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+enum smu_mall_pg_config {
+ SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
+};
+
static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
+ MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1),
+ MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1),
};
static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
return 0;
}
+static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_PMFW_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
+ return ret;
+ }
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_DRIVER_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
+ return ret;
+ }
+
+ if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_EXIT_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "EXIT MALL PG Failure\n");
+ return ret;
+ }
+ } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_ENTER_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Enter MALL PG Failure\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
+{
+ enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
+ int ret = 0;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
+ .set_mall_enable = smu_v14_0_common_set_mall_enable,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 706265220292..90703f4542ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v14_0_2_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index d8e449e6ebda..50cb8f7ee6b2 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -72,11 +72,6 @@ struct gamma_curve_sector {
u32 segment_width;
};
-struct gamma_curve_segment {
- u32 start;
- u32 end;
-};
-
static struct gamma_curve_sector sector_tbl[] = {
{ 0, 4, 4 },
{ 16, 4, 4 },
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 14ee79becacb..5ba62e637a61 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -12,10 +12,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif
#include <drm/drm_print.h>
@@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x)
DEFINE_SHOW_ATTRIBUTE(komeda_register);
-#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
{
if (!debugfs_initialized())
@@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
&mdev->err_verbosity);
}
-#endif
static ssize_t
core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
-#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
-#endif
return mdev;
@@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
-#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(mdev->debugfs_root);
-#endif
if (mdev->aclk)
clk_prepare_enable(mdev->aclk);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index f3e744172673..f4e76b46ca32 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
- if (!pipe_st)
+ if (IS_ERR_OR_NULL(pipe_st))
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 32506524d9a2..fe5fb08c9fc4 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
- struct drm_bridge **bridge = res;
+ struct drm_bridge *bridge = *(struct drm_bridge **)res;
- drm_panel_bridge_remove(*bridge);
+ if (!bridge)
+ return;
+
+ drm_bridge_remove(bridge);
}
/**
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 94f8c34fc293..6a8e45e9d0ec 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
if (size < chunk_size)
return -EINVAL;
- if (chunk_size < PAGE_SIZE)
+ if (chunk_size < SZ_4K)
return -EINVAL;
if (!is_power_of_2(chunk_size))
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d612133e2cf7..117237d3528b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (!info)
return ERR_PTR(-ENOMEM);
+ if (!drm_leak_fbdev_smem)
+ info->flags |= FBINFO_HIDE_SMEM_START;
+
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
@@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info = fb_helper->info;
info->var.pixclock = 0;
- if (!drm_leak_fbdev_smem)
- info->flags |= FBINFO_HIDE_SMEM_START;
-
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 6c9427bb4053..13cd754af311 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
info->fix.smem_len = info->screen_size;
return 0;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 638ffa4444f5..714e42b05108 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -469,14 +469,12 @@ void drm_file_update_pid(struct drm_file *filp)
dev = filp->minor->dev;
mutex_lock(&dev->filelist_mutex);
+ get_pid(pid);
old = rcu_replace_pointer(filp->pid, pid, 1);
mutex_unlock(&dev->filelist_mutex);
- if (pid != old) {
- get_pid(pid);
- synchronize_rcu();
- put_pid(old);
- }
+ synchronize_rcu();
+ put_pid(old);
}
/**
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 177773bcdbfd..53c003983ad1 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
+ drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+
ret = drm_gem_shmem_get_pages(shmem);
return ret;
@@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index aa93129c3397..2166208a961d 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO KUN */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index f48c4343f469..3e6d4c6aa877 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -285,7 +285,6 @@ struct platform_driver dp_driver = {
.remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fab135308b70..11a720fef32b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
+ int count;
/*
* the edid data comes from user side and it would be set
@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e968824a4c72..1e26cd4f8347 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return 0;
+ goto no_edid;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return 0;
+ goto no_edid;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
+
+no_edid:
+ return drm_add_modes_noedid(connector, 640, 480);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ed81e1466c4b..40e7d862675e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
- int ret;
-
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
- I915_COMPONENT_AUDIO);
- if (ret < 0) {
- drm_err(&i915->drm,
- "failed to add audio component (%d)\n", ret);
- /* continue with reduced functionality */
- return;
- }
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = component_add_typed(i915->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
i915->display.audio.component_registered = true;
}
@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
+void intel_audio_register(struct drm_i915_private *i915)
+{
+ if (!i915->display.audio.lpe.platdev)
+ i915_audio_component_register(i915);
+}
+
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 9327954b801e..576c061d72a4 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 89bd032ed995..794b4af38055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
+ intel_audio_register(i915);
+
intel_display_debugfs_register(i915);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e05e25cd4a94..5b3b6ae1e3d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -442,6 +442,10 @@ bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ /* eDP MSO is not compatible with joiner */
+ if (intel_dp->mso_link_count)
+ return false;
+
return DISPLAY_VER(dev_priv) >= 12 ||
(DISPLAY_VER(dev_priv) == 11 &&
encoder->port != PORT_A);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 42619fc05de4..090724fa766c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -255,6 +255,7 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2685,6 +2686,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
+ struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
+ gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
+ /*
+ * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+ * free VMAs while execbuf ioctl is validating VMAs.
+ */
+ if (gt->info.id)
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
+ if (gt->info.id)
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
+
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
+ /*
+ * This works in conjunction with eb_select_engine() to prevent
+ * i915_vma_parked() from interfering while execbuf validates vmas.
+ */
+ if (eb->gt->info.id)
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d287..5d7446a48ae7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+ /* TODO: make DPT shrinkable when it has no bound vmas */
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+ !obj->is_dpt;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 65a931ea80e9..3527b8f446fe 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d650beb8ed22..20b9b04ec1e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+ /* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
+
+ /* And confirm that we still want irqs enabled before we yield */
+ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+ intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
- cond_resched();
- }
+ irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- if (!b->irq_armed || __i915_request_is_complete(rq))
+ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5c8e9ee3b008..3b740ca25000 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
+ /*
+ * Store the number of active cslices before
+ * changing the CCS engine configuration
+ */
+ gt->ccs.cslices = CCS_MASK(gt);
+
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0);
/* Put back in the first CCS engine */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 40371b8a9bbb..93bc1cc1ee7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
return;
GEM_BUG_ON(fence->vma != vma);
+ i915_active_wait(&fence->active);
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
index 99b71bb7da0a..3c62a44e9106 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
- if (CCS_MASK(gt) & BIT(cslice))
+ if (gt->ccs.cslices & BIT(cslice))
/*
* If available, assign the cslice
* to the first available engine...
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index def7dd0eb6f1..cfdd2ad5e954 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
+ struct {
+ /*
+ * Mask of the non fused CCS slices
+ * to be used for the load balancing
+ */
+ intel_engine_mask_t cslices;
+ } ccs;
+
/*
* Default address space (either GGTT or ppGTT depending on arch).
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index bebf28e3c479..525587cfe1af 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 7ea244d876ca..9bb997dbb4b9 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b5f605751b0a..de811e2265da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
of_node_put(private->comp_node[i]);
}
+static void mtk_drm_shutdown(struct platform_device *pdev)
+{
+ struct mtk_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(private->drm);
+}
+
static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
@@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove_new = mtk_drm_remove,
+ .shutdown = mtk_drm_shutdown,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index fc3bfdc991d2..3926485bb197 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -538,7 +538,7 @@ class Parser(object):
self.variants.add(reg.domain)
def do_validate(self, schemafile):
- if self.validate == False:
+ if not self.validate:
return
try:
@@ -948,7 +948,8 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn', type=str, required=True)
parser.add_argument('--xml', type=str, required=True)
- parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--validate', default=False, action='store_true')
+ parser.add_argument('--no-validate', dest='validate', action='store_false')
subparsers = parser.add_subparsers()
subparsers.required = True
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 13705c5f1497..4b7497a8755c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
if (!suspend)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 670c9739e5e1..2033214c4b78 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
+ if (!mode)
+ continue;
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
@@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
+ if (!mode)
+ continue;
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
@@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
+ if (!mode)
+ continue;
}
/* CVT modes are sometimes unsuitable... */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 88728a0b2c25..674dc567e179 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
nv50_mstm_fini(nouveau_encoder(encoder));
}
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 79cfab53f80e..8c3c1f1e01c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,11 +43,6 @@
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
-struct init_exec {
- bool execute;
- bool repeat;
-};
-
static bool nv_cksum(const uint8_t *data, unsigned int length)
{
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index aed5d5b51b43..d4725a968827 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->headless)
+ return;
+
spin_lock_irq(&drm->hpd_lock);
drm->hpd_pending = ~0;
spin_unlock_irq(&drm->hpd_lock);
@@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
}
drm_connector_list_iter_end(&conn_iter);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
@@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
/* no display hw */
if (ret == -ENODEV) {
ret = 0;
+ drm->headless = true;
goto disp_create_err;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e239c6bf4afa..25fca98a20bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -276,6 +276,7 @@ struct nouveau_drm {
/* modesetting */
struct nvbios vbios;
struct nouveau_display *display;
+ bool headless;
struct work_struct hpd_work;
spinlock_t hpd_lock;
u32 hpd_pending;
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 4d1aaee8fe15..1d19c87eaec1 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
+ u32 args_size;
u8 stack[128];
int ret;
- if (sizeof(*args) + size > sizeof(stack)) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ if (check_add_overflow(sizeof(*args), size, &args_size))
+ return -ENOMEM;
+
+ if (args_size > sizeof(stack)) {
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args)
return -ENOMEM;
} else {
args = (void *)stack;
@@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
- ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ ret = nvif_object_ioctl(object, args, args_size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
@@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
object->map.size = 0;
if (parent) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
+ u32 args_size;
+
+ if (check_add_overflow(sizeof(*args), size, &args_size)) {
+ nvif_object_dtor(object);
+ return -ENOMEM;
+ }
+
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args) {
nvif_object_dtor(object);
return -ENOMEM;
}
@@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->new.oclass = oclass;
memcpy(args->new.data, data, size);
- ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
- &object->priv);
+ ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
memcpy(data, args->new.data, size);
kfree(args);
if (ret == 0)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 982324ef5a41..2ae0eb0638f3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for LG sw43408 panel.
The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index 115f4702d59f..2b3a73696dce 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
}
-const struct backlight_ops sw43408_backlight_ops = {
+static const struct backlight_ops sw43408_backlight_ops = {
.update_status = sw43408_backlight_update_status,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index dcb6d0b6ced0..c8cdc8356c58 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2752,6 +2752,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx26d202vm0bwa = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 88e80fe98112..28bfc48a9127 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
- .hsync_start = 240 + 28,
- .hsync_end = 240 + 28 + 10,
- .htotal = 240 + 28 + 10 + 10,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
.vdisplay = 280,
- .vsync_start = 280 + 8,
- .vsync_end = 280 + 8 + 4,
- .vtotal = 280 + 8 + 4 + 4,
- .width_mm = 43,
- .height_mm = 37,
+ .vsync_start = 280 + 48,
+ .vsync_end = 280 + 48 + 4,
+ .vtotal = 280 + 48 + 4 + 4,
+ .width_mm = 37,
+ .height_mm = 43,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
- of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index d47b40b82b0b..8e0ff3efede7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
if (bo->is_heap)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 21d27e6235f3..b11f7c5bbcbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index e83c3e52251d..0250d5f00bf1 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
}
+static void shmob_drm_shutdown(struct platform_device *pdev)
+{
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&sdev->ddev);
+}
+
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
@@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove_new = shmob_drm_remove,
+ .shutdown = shmob_drm_shutdown,
.driver = {
.name = "shmob-drm",
.of_match_table = of_match_ptr(shmob_drm_of_table),
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index b3be68b03610..dd8fb9f8341a 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -505,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
* Eventually we will have a fully 50% fragmented mm.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
@@ -520,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
for (order = top; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
mm_size, size, size,
&tmp, flags),
@@ -534,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
/* There should be one final page for this sub-allocation */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM for hole\n");
@@ -544,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
list_move_tail(&block->link, &holes);
- size = get_size(top, PAGE_SIZE);
+ size = get_size(top, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
@@ -555,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
@@ -584,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
* page left.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order < max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -604,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* And now the last remaining block available */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM on final alloc\n");
@@ -616,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
/* Should be completely full! */
for (order = max_order; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -632,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -647,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* To confirm, now the whole mm should be available */
- size = get_size(max_order, PAGE_SIZE);
+ size = get_size(max_order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
@@ -678,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
* try to allocate them all.
*/
- mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+ mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -699,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
}
/* Should be completely full! */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -716,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
LIST_HEAD(allocated);
struct drm_buddy mm;
- KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
"mm.max_order(%d) != %d\n", mm.max_order,
@@ -724,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
size = mm.chunk_size << mm.max_order;
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
- PAGE_SIZE, &allocated, flags));
+ mm.chunk_size, &allocated, flags));
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
KUNIT_EXPECT_TRUE(test, block);
@@ -734,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_order(block), mm.max_order);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE,
+ BIT_ULL(mm.max_order) * mm.chunk_size,
"block size(%llu) != %llu\n",
drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
+ BIT_ULL(mm.max_order) * mm.chunk_size);
drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index faddae3d6ac2..6f1ac940cbae 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -2,7 +2,7 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && MMU
- depends on X86 || ARM64
+ depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8f1730aeacc9..823d8d2da17c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -746,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->vram_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "Register MMIO at 0x%pa size is %llu kiB\n",
+ "Register MMIO at 0x%pa size is %llu KiB\n",
&rmmio_start, (uint64_t)rmmio_size / 1024);
dev->rmmio = devm_ioremap(dev->drm.dev,
rmmio_start,
@@ -765,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
fifo_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "FIFO at %pa size is %llu kiB\n",
+ "FIFO at %pa size is %llu KiB\n",
&fifo_start, (uint64_t)fifo_size / 1024);
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
@@ -790,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
* SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm,
- "VRAM at %pa size is %llu kiB\n",
+ "VRAM at %pa size is %llu KiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
return 0;
@@ -960,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
- /*
- * Workaround for low memory 2D VMs to compensate for the
- * allocation taken by fbdev
- */
- if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 3;
-
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
@@ -991,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->max_primary_mem = dev_priv->vram_size;
}
drm_info(&dev_priv->drm,
- "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
+ "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n",
(u64)dev_priv->vram_size / 1024,
(u64)dev_priv->fifo_mem_size / 1024,
dev_priv->memory_size / 1024);
drm_info(&dev_priv->drm,
- "MOB limits: max mob size = %u kB, max mob pages = %u\n",
+ "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv);
@@ -1015,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm,
- "Maximum display memory size is %llu kiB\n",
+ "Maximum display memory size is %llu KiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ecaea0026fc..a1ce41e1c468 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index a0b47c9b33f5..5bd967fbcf55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
} else
new_max_pages = gman->max_gmr_pages * 2;
if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
- DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
+ DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n",
((new_max_pages) << (PAGE_SHIFT - 10)));
gman->max_gmr_pages = new_max_pages;
} else {
char buf[256];
snprintf(buf, sizeof(buf),
- "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
+ "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n",
((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
vmw_host_printf(buf);
DRM_WARN("%s", buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 13b2820cae51..00c4ff684130 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
- if (old_image && new_image)
+ if (old_image && new_image && old_image != new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
@@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
+static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
+ u64 pitch,
+ u64 height)
{
- return ((u64) pitch * (u64) height) < (u64)
- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
- dev_priv->max_primary_mem : dev_priv->vram_size);
+ return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@@ -2873,25 +2872,18 @@ out_unref:
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
- u32 max_width = dev_priv->texture_max_width;
- u32 max_height = dev_priv->texture_max_height;
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(dev_priv->stdu_max_width, max_width);
- max_height = min(dev_priv->stdu_max_height, max_height);
- }
-
- if (max_width < mode->hdisplay)
- return MODE_BAD_HVALUE;
-
- if (max_height < mode->vdisplay)
- return MODE_BAD_VVALUE;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 2041c4d48daa..a04e0736318d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -43,7 +43,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@@ -85,11 +92,6 @@ struct vmw_stdu_update {
SVGA3dCmdUpdateGBScreenTarget body;
};
-struct vmw_stdu_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
-};
-
struct vmw_stdu_surface_copy {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
@@ -414,6 +416,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
+ struct drm_crtc_state *new_crtc_state;
int ret;
if (!crtc) {
@@ -423,6 +426,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (dev_priv->vkms_enabled)
drm_crtc_vblank_off(crtc);
@@ -434,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
(void) vmw_stdu_update_st(dev_priv, stdu);
+ /* Don't destroy the Screen Target if we are only setting the
+ * display as inactive
+ */
+ if (new_crtc_state->enable &&
+ !new_crtc_state->active &&
+ !new_crtc_state->mode_changed)
+ return;
+
ret = vmw_stdu_destroy_st(dev_priv, stdu);
if (ret)
DRM_ERROR("Failed to destroy Screen Target\n");
@@ -829,7 +841,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+ /* Align width and height to account for GPU tile over-alignment */
+ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+ assumed_cpp;
+ required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+ dev_priv->stdu_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
+ if (required_mem > dev_priv->max_primary_mem)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
@@ -845,7 +891,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 8fc0f3f6ecc5..944770fb2daf 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = {
static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
{
struct kobject *kobj = arg;
+ struct xe_gt *gt = kobj_to_gt(kobj->parent);
+
+ if (gt_to_xe(gt)->info.skip_guc_pc) {
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ xe_gt_idle_disable_c6(gt);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -199,7 +206,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
void xe_gt_idle_disable_c6(struct xe_gt *gt)
{
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, PG_ENABLE, 0);
xe_mmio_write32(gt, RC_CONTROL, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 79116ad58620..6c2cfc54442c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
for_each_tile(tile, xe, tid) {
lmtt = &tile->sriov.pf.lmtt;
xe_lmtt_drop_pages(lmtt, vfid);
@@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
total = 0;
for_each_tile(tile, xe, tid)
total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
@@ -1337,6 +1343,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
+ xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, IS_DGFX(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1745,10 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_device *xe = gt_to_xe(gt);
if (!xe_gt_is_media_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
- pf_release_vf_config_lmem(gt, config);
+ if (IS_DGFX(xe)) {
+ pf_release_vf_config_lmem(gt, config);
+ pf_update_vf_lmtt(xe, vfid);
+ }
}
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 240e7a4bbff1..5faca4fc2fef 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -631,8 +631,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_device *xe = guc_to_xe(guc);
int err;
- guc_enable_irq(guc);
-
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
@@ -640,6 +638,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
if (err)
return err;
+ } else {
+ guc_enable_irq(guc);
}
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 509649d0e65e..23382ced4ea7 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_pc *pc = arg;
- struct xe_device *xe = pc_to_xe(pc);
-
- if (xe->info.skip_guc_pc) {
- xe_gt_idle_disable_c6(pc_to_gt(pc));
- return;
- }
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index c7d38469fb46..e4e3658e6a13 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return 0;
err_entity:
+ mutex_unlock(&guc->submission_state.lock);
xe_sched_entity_fini(&ge->entity);
err_sched:
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 9f6e9b7f11c8..65e5a3f4c340 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -34,7 +34,6 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_vm.h"
-#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
- if (!XE_WA(gt, 16017236439) ||
- xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
@@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
+ /*
+ * XXX: Currently only reserving 1 (likely slow) BCS instance on
+ * PVC, may want to revisit if performance is needed.
+ */
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index c010ef16fbf5..a5e7da8cf944 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000, true);
+ true, 50 * 1000, true);
preempt_enable();
out:
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index d42b3f33bd7a..aca7a9af6e84 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
return i;
}
+static int emit_flush_dw(u32 *dw, int i)
+{
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
u32 *dw, int i)
{
@@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
@@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
index 95c09d4f3a86..33a47e73f0fa 100644
--- a/drivers/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -375,5 +375,6 @@ static void __exit gb_exit(void)
tracepoint_synchronize_unregister();
}
module_exit(gb_exit);
+MODULE_DESCRIPTION("Greybus core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 1ee78d0d90b4..69e46b1dff1f 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -1456,5 +1456,6 @@ static struct usb_driver es2_ap_driver = {
module_usb_driver(es2_ap_driver);
+MODULE_DESCRIPTION("Greybus AP USB driver for ES2 controller chips");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 02de2bf4f790..37e6d25593c2 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1204,8 +1204,8 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
/* match many more n-key devices */
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- for (int i = 0; i < *rsize + 1; i++) {
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
+ for (int i = 0; i < *rsize - 15; i++) {
/* offset to the count from 0x5a report part always 14 */
if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b1fa0378e8f4..74efda212c55 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
hid_warn(hid,
"%s() called with too large value %d (n: %d)! (%s)\n",
__func__, value, n, current->comm);
- WARN_ON(1);
value &= m;
}
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 87a961cae775..d5abfe652fb5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3366,6 +3366,8 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable",
[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
+ [KEY_ACCESSIBILITY] = "Accessibility",
+ [KEY_DO_NOT_DISTURB] = "DoNotDisturb",
[KEY_DICTATE] = "Dictate",
[KEY_MICMUTE] = "MicrophoneMute",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 61d2a21affa2..72d56ee7ce1b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -423,6 +423,8 @@
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
+#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
+#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e03d300d2bac..c9094a4f281e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -833,9 +837,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
+ switch (usage->hid & 0xf) {
+ case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
switch (usage->hid & 0xf) {
case 0x9: map_key_clear(KEY_MICMUTE); break;
+ case 0xa: map_key_clear(KEY_ACCESSIBILITY); break;
default: goto ignore;
}
break;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 3c3c497b6b91..37958edec55f 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
*/
msleep(50);
- if (retval)
+ if (retval) {
+ kfree(dj_report);
return retval;
+ }
}
/*
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index b81d5bcc76a7..400d70e6dbe2 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -27,6 +27,7 @@
#include "usbhid/usbhid.h"
#include "hid-ids.h"
+MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index b4a97803eca3..3062daf68d31 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -2725,13 +2725,13 @@ static int nintendo_hid_probe(struct hid_device *hdev,
ret = joycon_power_supply_create(ctlr);
if (ret) {
hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret);
- goto err_close;
+ goto err_ida;
}
ret = joycon_input_create(ctlr);
if (ret) {
hid_err(hdev, "Failed to create input device; ret=%d\n", ret);
- goto err_close;
+ goto err_ida;
}
ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
@@ -2739,6 +2739,8 @@ static int nintendo_hid_probe(struct hid_device *hdev,
hid_dbg(hdev, "probe - success\n");
return 0;
+err_ida:
+ ida_free(&nintendo_player_id_allocator, ctlr->player_id);
err_close:
hid_hw_close(hdev);
err_stop:
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index 58b15750dbb0..ff9078ad1961 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
return haptics;
input_set_capability(haptics, EV_FF, FF_RUMBLE);
- input_ff_create_memless(haptics, NULL, play_effect);
+ ret = input_ff_create_memless(haptics, NULL, play_effect);
+ if (ret)
+ goto err;
ret = input_register_device(haptics);
if (ret)
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 5b91fb106cfc..091e37933225 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -31,6 +31,7 @@ struct i2c_hid_of_elan {
struct regulator *vcc33;
struct regulator *vccio;
struct gpio_desc *reset_gpio;
+ bool no_reset_on_power_off;
const struct elan_i2c_hid_chip_data *chip_data;
};
@@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
container_of(ops, struct i2c_hid_of_elan, ops);
int ret;
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->vcc33) {
ret = regulator_enable(ihid_elan->vcc33);
if (ret)
- return ret;
+ goto err_deassert_reset;
}
ret = regulator_enable(ihid_elan->vccio);
- if (ret) {
- regulator_disable(ihid_elan->vcc33);
- return ret;
- }
+ if (ret)
+ goto err_disable_vcc33;
if (ihid_elan->chip_data->post_power_delay_ms)
msleep(ihid_elan->chip_data->post_power_delay_ms);
@@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
return 0;
+
+err_disable_vcc33:
+ if (ihid_elan->vcc33)
+ regulator_disable(ihid_elan->vcc33);
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
@@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
struct i2c_hid_of_elan *ihid_elan =
container_of(ops, struct i2c_hid_of_elan, ops);
- gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ /*
+ * Do not assert reset when the hardware allows for it to remain
+ * deasserted regardless of the state of the (shared) power supply to
+ * avoid wasting power when the supply is left on.
+ */
+ if (!ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
@@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
+ int ret;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
if (IS_ERR(ihid_elan->reset_gpio))
return PTR_ERR(ihid_elan->reset_gpio);
+ ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
+ "no-reset-on-power-off");
+
ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ihid_elan->vccio))
- return PTR_ERR(ihid_elan->vccio);
+ if (IS_ERR(ihid_elan->vccio)) {
+ ret = PTR_ERR(ihid_elan->vccio);
+ goto err_deassert_reset;
+ }
ihid_elan->chip_data = device_get_match_data(&client->dev);
if (ihid_elan->chip_data->main_supply_name) {
ihid_elan->vcc33 = devm_regulator_get(&client->dev,
ihid_elan->chip_data->main_supply_name);
- if (IS_ERR(ihid_elan->vcc33))
- return PTR_ERR(ihid_elan->vcc33);
+ if (IS_ERR(ihid_elan->vcc33)) {
+ ret = PTR_ERR(ihid_elan->vcc33);
+ goto err_deassert_reset;
+ }
}
- return i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ret = i2c_hid_core_probe(client, &ihid_elan->ops,
+ ihid_elan->chip_data->hid_descriptor_address, 0);
+ if (ret)
+ goto err_deassert_reset;
+
+ return 0;
+
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c
index 993f8b390e57..fcca070bdecb 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.c
@@ -84,8 +84,8 @@ static int loader_write_message(struct ishtp_device *dev, void *buf, int len)
static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len,
void *resp, int resp_len)
{
- struct loader_msg_header *req_hdr = req;
- struct loader_msg_header *resp_hdr = resp;
+ union loader_msg_header req_hdr;
+ union loader_msg_header resp_hdr;
struct device *devc = dev->devc;
int rv;
@@ -93,34 +93,37 @@ static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len,
dev->fw_loader_rx_size = resp_len;
rv = loader_write_message(dev, req, req_len);
+ req_hdr.val32 = le32_to_cpup(req);
+
if (rv < 0) {
- dev_err(devc, "write cmd %u failed:%d\n", req_hdr->command, rv);
+ dev_err(devc, "write cmd %u failed:%d\n", req_hdr.command, rv);
return rv;
}
/* Wait the ACK */
wait_event_interruptible_timeout(dev->wait_loader_recvd_msg, dev->fw_loader_received,
ISHTP_LOADER_TIMEOUT);
+ resp_hdr.val32 = le32_to_cpup(resp);
dev->fw_loader_rx_size = 0;
dev->fw_loader_rx_buf = NULL;
if (!dev->fw_loader_received) {
- dev_err(devc, "wait response of cmd %u timeout\n", req_hdr->command);
+ dev_err(devc, "wait response of cmd %u timeout\n", req_hdr.command);
return -ETIMEDOUT;
}
- if (!resp_hdr->is_response) {
- dev_err(devc, "not a response for %u\n", req_hdr->command);
+ if (!resp_hdr.is_response) {
+ dev_err(devc, "not a response for %u\n", req_hdr.command);
return -EBADMSG;
}
- if (req_hdr->command != resp_hdr->command) {
- dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr->command,
- resp_hdr->command);
+ if (req_hdr.command != resp_hdr.command) {
+ dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr.command,
+ resp_hdr.command);
return -EBADMSG;
}
- if (resp_hdr->status) {
- dev_err(devc, "cmd %u failed %u\n", req_hdr->command, resp_hdr->status);
+ if (resp_hdr.status) {
+ dev_err(devc, "cmd %u failed %u\n", req_hdr.command, resp_hdr.status);
return -EIO;
}
@@ -138,12 +141,13 @@ static void release_dma_bufs(struct ishtp_device *dev,
struct loader_xfer_dma_fragment *fragment,
void **dma_bufs, u32 fragment_size)
{
+ dma_addr_t dma_addr;
int i;
for (i = 0; i < FRAGMENT_MAX_NUM; i++) {
if (dma_bufs[i]) {
- dma_free_coherent(dev->devc, fragment_size, dma_bufs[i],
- fragment->fragment_tbl[i].ddr_adrs);
+ dma_addr = le64_to_cpu(fragment->fragment_tbl[i].ddr_adrs);
+ dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], dma_addr);
dma_bufs[i] = NULL;
}
}
@@ -156,29 +160,33 @@ static void release_dma_bufs(struct ishtp_device *dev,
* @fragment: The ISHTP firmware fragment descriptor
* @dma_bufs: The array of DMA fragment buffers
* @fragment_size: The size of a single DMA fragment
+ * @fragment_count: Number of fragments
*
* Return: 0 on success, negative error code on failure
*/
static int prepare_dma_bufs(struct ishtp_device *dev,
const struct firmware *ish_fw,
struct loader_xfer_dma_fragment *fragment,
- void **dma_bufs, u32 fragment_size)
+ void **dma_bufs, u32 fragment_size, u32 fragment_count)
{
+ dma_addr_t dma_addr;
u32 offset = 0;
+ u32 length;
int i;
- for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) {
- dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size,
- &fragment->fragment_tbl[i].ddr_adrs, GFP_KERNEL);
+ for (i = 0; i < fragment_count && offset < ish_fw->size; i++) {
+ dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, &dma_addr, GFP_KERNEL);
if (!dma_bufs[i])
return -ENOMEM;
- fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size);
- fragment->fragment_tbl[i].fw_off = offset;
- memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length);
+ fragment->fragment_tbl[i].ddr_adrs = cpu_to_le64(dma_addr);
+ length = clamp(ish_fw->size - offset, 0, fragment_size);
+ fragment->fragment_tbl[i].length = cpu_to_le32(length);
+ fragment->fragment_tbl[i].fw_off = cpu_to_le32(offset);
+ memcpy(dma_bufs[i], ish_fw->data + offset, length);
clflush_cache_range(dma_bufs[i], fragment_size);
- offset += fragment->fragment_tbl[i].length;
+ offset += length;
}
return 0;
@@ -206,17 +214,17 @@ void ishtp_loader_work(struct work_struct *work)
{
DEFINE_RAW_FLEX(struct loader_xfer_dma_fragment, fragment, fragment_tbl, FRAGMENT_MAX_NUM);
struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader);
- struct loader_xfer_query query = {
- .header.command = LOADER_CMD_XFER_QUERY,
- };
- struct loader_start start = {
- .header.command = LOADER_CMD_START,
- };
+ union loader_msg_header query_hdr = { .command = LOADER_CMD_XFER_QUERY, };
+ union loader_msg_header start_hdr = { .command = LOADER_CMD_START, };
+ union loader_msg_header fragment_hdr = { .command = LOADER_CMD_XFER_FRAGMENT, };
+ struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), };
+ struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), };
union loader_recv_message recv_msg;
char *filename = dev->driver_data->fw_filename;
const struct firmware *ish_fw;
void *dma_bufs[FRAGMENT_MAX_NUM] = {};
u32 fragment_size;
+ u32 fragment_count;
int retry = ISHTP_LOADER_RETRY_TIMES;
int rv;
@@ -226,23 +234,24 @@ void ishtp_loader_work(struct work_struct *work)
return;
}
- fragment->fragment.header.command = LOADER_CMD_XFER_FRAGMENT;
- fragment->fragment.xfer_mode = LOADER_XFER_MODE_DMA;
- fragment->fragment.is_last = 1;
- fragment->fragment.size = ish_fw->size;
+ fragment->fragment.header = cpu_to_le32(fragment_hdr.val32);
+ fragment->fragment.xfer_mode = cpu_to_le32(LOADER_XFER_MODE_DMA);
+ fragment->fragment.is_last = cpu_to_le32(1);
+ fragment->fragment.size = cpu_to_le32(ish_fw->size);
/* Calculate the size of a single DMA fragment */
fragment_size = PFN_ALIGN(DIV_ROUND_UP(ish_fw->size, FRAGMENT_MAX_NUM));
/* Calculate the count of DMA fragments */
- fragment->fragment_cnt = DIV_ROUND_UP(ish_fw->size, fragment_size);
+ fragment_count = DIV_ROUND_UP(ish_fw->size, fragment_size);
+ fragment->fragment_cnt = cpu_to_le32(fragment_count);
- rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size);
+ rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size, fragment_count);
if (rv) {
dev_err(dev->devc, "prepare DMA buffer failed.\n");
goto out;
}
do {
- query.image_size = ish_fw->size;
+ query.image_size = cpu_to_le32(ish_fw->size);
rv = loader_xfer_cmd(dev, &query, sizeof(query), recv_msg.raw_data,
sizeof(struct loader_xfer_query_ack));
if (rv)
@@ -255,7 +264,7 @@ void ishtp_loader_work(struct work_struct *work)
recv_msg.query_ack.version_build);
rv = loader_xfer_cmd(dev, fragment,
- struct_size(fragment, fragment_tbl, fragment->fragment_cnt),
+ struct_size(fragment, fragment_tbl, fragment_count),
recv_msg.raw_data, sizeof(struct loader_xfer_fragment_ack));
if (rv)
continue; /* try again if failed */
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h
index 7aa45ebc3f7b..308b96085a4d 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.h
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.h
@@ -30,19 +30,23 @@ struct work_struct;
#define LOADER_XFER_MODE_DMA BIT(0)
/**
- * struct loader_msg_header - ISHTP firmware loader message header
+ * union loader_msg_header - ISHTP firmware loader message header
* @command: Command type
* @is_response: Indicates if the message is a response
* @has_next: Indicates if there is a next message
* @reserved: Reserved for future use
* @status: Status of the message
- */
-struct loader_msg_header {
- __le32 command:7;
- __le32 is_response:1;
- __le32 has_next:1;
- __le32 reserved:15;
- __le32 status:8;
+ * @val32: entire header as a 32-bit value
+ */
+union loader_msg_header {
+ struct {
+ __u32 command:7;
+ __u32 is_response:1;
+ __u32 has_next:1;
+ __u32 reserved:15;
+ __u32 status:8;
+ };
+ __u32 val32;
};
/**
@@ -51,7 +55,7 @@ struct loader_msg_header {
* @image_size: Size of the image
*/
struct loader_xfer_query {
- struct loader_msg_header header;
+ __le32 header;
__le32 image_size;
};
@@ -103,7 +107,7 @@ struct loader_capability {
* @capability: Loader capability
*/
struct loader_xfer_query_ack {
- struct loader_msg_header header;
+ __le32 header;
__le16 version_major;
__le16 version_minor;
__le16 version_hotfix;
@@ -122,7 +126,7 @@ struct loader_xfer_query_ack {
* @is_last: Is last
*/
struct loader_xfer_fragment {
- struct loader_msg_header header;
+ __le32 header;
__le32 xfer_mode;
__le32 offset;
__le32 size;
@@ -134,7 +138,7 @@ struct loader_xfer_fragment {
* @header: Header of the message
*/
struct loader_xfer_fragment_ack {
- struct loader_msg_header header;
+ __le32 header;
};
/**
@@ -170,7 +174,7 @@ struct loader_xfer_dma_fragment {
* @header: Header of the message
*/
struct loader_start {
- struct loader_msg_header header;
+ __le32 header;
};
/**
@@ -178,10 +182,11 @@ struct loader_start {
* @header: Header of the message
*/
struct loader_start_ack {
- struct loader_msg_header header;
+ __le32 header;
};
union loader_recv_message {
+ __le32 header;
struct loader_xfer_query_ack query_ack;
struct loader_xfer_fragment_ack fragment_ack;
struct loader_start_ack start_ack;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a8ad728354cb..e0d676c74f14 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -45,8 +45,8 @@ int hv_init(void)
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
- enum hv_message_type message_type,
- void *payload, size_t payload_size)
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
unsigned long flags;
@@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
status = HV_STATUS_INVALID_PARAMETER;
} else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
- aligned_msg, NULL);
+ aligned_msg, NULL);
}
local_irq_restore(flags);
@@ -111,7 +111,7 @@ int hv_synic_alloc(void)
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
- if (hv_context.hv_numa_map == NULL) {
+ if (!hv_context.hv_numa_map) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
@@ -120,11 +120,11 @@ int hv_synic_alloc(void)
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_init(&hv_cpu->msg_dpc,
- vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ vmbus_on_msg_dpc, (unsigned long)hv_cpu);
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->post_msg_page == NULL) {
+ if (!hv_cpu->post_msg_page) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
@@ -147,14 +147,14 @@ int hv_synic_alloc(void)
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
+ if (!hv_cpu->synic_message_page) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
+ if (!hv_cpu->synic_event_page) {
pr_err("Unable to allocate SYNIC event page\n");
free_page((unsigned long)hv_cpu->synic_message_page);
@@ -203,14 +203,13 @@ err:
return ret;
}
-
void hv_synic_free(void)
{
int cpu, ret;
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
/* It's better to leak the page if the encryption fails. */
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
@@ -262,8 +261,8 @@ void hv_synic_free(void)
*/
void hv_synic_enable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
@@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_message_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_message_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
@@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_event_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_event_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
@@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index e000fa3b9f97..0e7427c2baf5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
#include <linux/page_reporting.h>
+#include <linux/sizes.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
@@ -41,8 +42,6 @@
* Begin protocol definitions.
*/
-
-
/*
* Protocol versions. The low word is the minor version, the high word the major
* version.
@@ -71,8 +70,6 @@ enum {
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};
-
-
/*
* Message Types
*/
@@ -101,7 +98,6 @@ enum dm_message_type {
DM_VERSION_1_MAX = 12
};
-
/*
* Structures defining the dynamic memory management
* protocol.
@@ -115,7 +111,6 @@ union dm_version {
__u32 version;
} __packed;
-
union dm_caps {
struct {
__u64 balloon:1;
@@ -148,8 +143,6 @@ union dm_mem_page_range {
__u64 page_range;
} __packed;
-
-
/*
* The header for all dynamic memory messages:
*
@@ -174,7 +167,6 @@ struct dm_message {
__u8 data[]; /* enclosed message */
} __packed;
-
/*
* Specific message types supporting the dynamic memory protocol.
*/
@@ -271,7 +263,6 @@ struct dm_status {
__u32 io_diff;
} __packed;
-
/*
* Message to ask the guest to allocate memory - balloon up message.
* This message is sent from the host to the guest. The guest may not be
@@ -286,14 +277,13 @@ struct dm_balloon {
__u32 reservedz;
} __packed;
-
/*
* Balloon response message; this message is sent from the guest
* to the host in response to the balloon message.
*
* reservedz: Reserved; must be set to zero.
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* range_count: The number of ranges in the range array.
*
@@ -314,7 +304,7 @@ struct dm_balloon_response {
* to the guest to give guest more memory.
*
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* reservedz: Reserved; must be set to zero.
*
@@ -342,7 +332,6 @@ struct dm_unballoon_response {
struct dm_header hdr;
} __packed;
-
/*
* Hot add request message. Message sent from the host to the guest.
*
@@ -390,7 +379,6 @@ enum dm_info_type {
MAX_INFO_TYPE
};
-
/*
* Header for the information message.
*/
@@ -425,11 +413,11 @@ struct dm_info_msg {
* The range start_pfn : end_pfn specifies the range
* that the host has asked us to hot add. The range
* start_pfn : ha_end_pfn specifies the range that we have
- * currently hot added. We hot add in multiples of 128M
- * chunks; it is possible that we may not be able to bring
- * online all the pages in the region. The range
+ * currently hot added. We hot add in chunks equal to the
+ * memory block size; it is possible that we may not be able
+ * to bring online all the pages in the region. The range
* covered_start_pfn:covered_end_pfn defines the pages that can
- * be brough online.
+ * be brought online.
*/
struct hv_hotadd_state {
@@ -480,10 +468,10 @@ static unsigned long last_post_time;
static int hv_hypercall_multi_failure;
-module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+module_param(hot_add, bool, 0644);
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
-module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+module_param(pressure_report_delay, uint, 0644);
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
@@ -502,11 +490,13 @@ enum hv_dm_state {
DM_INIT_ERROR
};
-
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+
+static unsigned long ha_pages_in_chunk;
+#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
+
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
-#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -595,12 +585,12 @@ static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
struct hv_hotadd_gap *gap;
/* The page is not backed. */
- if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
+ if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
return false;
/* Check for gaps. */
list_for_each_entry(gap, &has->gap_list, list) {
- if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
+ if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
return false;
}
@@ -724,28 +714,21 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
- for (i = 0; i < (size/HA_CHUNK); i++) {
- start_pfn = start + (i * HA_CHUNK);
+ for (i = 0; i < (size/ha_pages_in_chunk); i++) {
+ start_pfn = start + (i * ha_pages_in_chunk);
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn += HA_CHUNK;
-
- if (total_pfn > HA_CHUNK) {
- processed_pfn = HA_CHUNK;
- total_pfn -= HA_CHUNK;
- } else {
- processed_pfn = total_pfn;
- total_pfn = 0;
- }
-
- has->covered_end_pfn += processed_pfn;
+ has->ha_end_pfn += ha_pages_in_chunk;
+ processed_pfn = umin(total_pfn, ha_pages_in_chunk);
+ total_pfn -= processed_pfn;
+ has->covered_end_pfn += processed_pfn;
}
reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
+ HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
@@ -760,7 +743,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
do_hot_add = false;
}
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn -= HA_CHUNK;
+ has->ha_end_pfn -= ha_pages_in_chunk;
has->covered_end_pfn -= processed_pfn;
}
break;
@@ -787,8 +770,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
- if ((pfn < has->start_pfn) ||
- (pfn + (1UL << order) > has->end_pfn))
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
continue;
hv_bring_pgs_online(has, pfn, 1UL << order);
@@ -800,7 +783,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct hv_hotadd_state *has;
struct hv_hotadd_gap *gap;
- unsigned long residual, new_inc;
+ unsigned long residual;
int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock);
@@ -836,15 +819,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* our current limit; extend it.
*/
if ((start_pfn + pfn_cnt) > has->end_pfn) {
+ /* Extend the region by multiples of ha_pages_in_chunk */
residual = (start_pfn + pfn_cnt - has->end_pfn);
- /*
- * Extend the region by multiples of HA_CHUNK.
- */
- new_inc = (residual / HA_CHUNK) * HA_CHUNK;
- if (residual % HA_CHUNK)
- new_inc += HA_CHUNK;
-
- has->end_pfn += new_inc;
+ has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
}
ret = 1;
@@ -855,7 +832,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
}
static unsigned long handle_pg_range(unsigned long pg_start,
- unsigned long pg_count)
+ unsigned long pg_count)
{
unsigned long start_pfn = pg_start;
unsigned long pfn_cnt = pg_count;
@@ -866,7 +843,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long res = 0, flags;
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
- pg_start);
+ pg_start);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
@@ -902,22 +879,19 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (start_pfn > has->start_pfn &&
online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
-
}
- if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
+ if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
/*
* We have some residual hot add range
* that needs to be hot added; hot add
* it now. Hot add a multiple of
- * HA_CHUNK that fully covers the pages
+ * ha_pages_in_chunk that fully covers the pages
* we have.
*/
size = (has->end_pfn - has->ha_end_pfn);
if (pfn_cnt <= size) {
- size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
- if (pfn_cnt % HA_CHUNK)
- size += HA_CHUNK;
+ size = ALIGN(pfn_cnt, ha_pages_in_chunk);
} else {
pfn_cnt = size;
}
@@ -1010,10 +984,7 @@ static void hot_add_req(struct work_struct *dummy)
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
- if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
- unsigned long region_size;
- unsigned long region_start;
-
+ if (rg_start == 0 && !dm->host_specified_ha_region) {
/*
* The host has not specified the hot-add region.
* Based on the hot-add page range being specified,
@@ -1021,19 +992,13 @@ static void hot_add_req(struct work_struct *dummy)
* that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add.
*/
- region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
- if (pfn_cnt % HA_CHUNK)
- region_size += HA_CHUNK;
-
- region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
-
- rg_start = region_start;
- rg_sz = region_size;
+ rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
+ rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
}
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
- rg_start, rg_sz);
+ rg_start, rg_sz);
dm->num_pages_added += resp.page_count;
#endif
@@ -1211,11 +1176,10 @@ static void post_status(struct hv_dynmem_device *dm)
sizeof(struct dm_status),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
-
}
static void free_balloon_pages(struct hv_dynmem_device *dm,
- union dm_mem_page_range *range_array)
+ union dm_mem_page_range *range_array)
{
int num_pages = range_array->finfo.page_cnt;
__u64 start_frame = range_array->finfo.start_page;
@@ -1231,8 +1195,6 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
}
}
-
-
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int num_pages,
struct dm_balloon_response *bl_resp,
@@ -1278,7 +1240,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
page_to_pfn(pg);
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
-
}
return i * alloc_unit;
@@ -1332,7 +1293,7 @@ static void balloon_up(struct work_struct *dummy)
if (num_ballooned == 0 || num_ballooned == num_pages) {
pr_debug("Ballooned %u out of %u requested pages.\n",
- num_pages, dm_device.balloon_wrk.num_pages);
+ num_pages, dm_device.balloon_wrk.num_pages);
bl_resp->more_pages = 0;
done = true;
@@ -1366,16 +1327,15 @@ static void balloon_up(struct work_struct *dummy)
for (i = 0; i < bl_resp->range_count; i++)
free_balloon_pages(&dm_device,
- &bl_resp->range_array[i]);
+ &bl_resp->range_array[i]);
done = true;
}
}
-
}
static void balloon_down(struct hv_dynmem_device *dm,
- struct dm_unballoon_request *req)
+ struct dm_unballoon_request *req)
{
union dm_mem_page_range *range_array = req->range_array;
int range_count = req->range_count;
@@ -1389,7 +1349,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
}
pr_debug("Freed %u ballooned pages.\n",
- prev_pages_ballooned - dm->num_pages_ballooned);
+ prev_pages_ballooned - dm->num_pages_ballooned);
if (req->more_pages == 1)
return;
@@ -1414,8 +1374,7 @@ static int dm_thread_func(void *dm_dev)
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
- wait_for_completion_interruptible_timeout(
- &dm_device.config_event, 1*HZ);
+ wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -1439,9 +1398,8 @@ static int dm_thread_func(void *dm_dev)
return 0;
}
-
static void version_resp(struct hv_dynmem_device *dm,
- struct dm_version_response *vresp)
+ struct dm_version_response *vresp)
{
struct dm_version_request version_req;
int ret;
@@ -1502,7 +1460,7 @@ version_error:
}
static void cap_resp(struct hv_dynmem_device *dm,
- struct dm_capabilities_resp_msg *cap_resp)
+ struct dm_capabilities_resp_msg *cap_resp)
{
if (!cap_resp->is_accepted) {
pr_err("Capabilities not accepted by host\n");
@@ -1535,7 +1493,7 @@ static void balloon_onchannelcallback(void *context)
switch (dm_hdr->type) {
case DM_VERSION_RESPONSE:
version_resp(dm,
- (struct dm_version_response *)dm_msg);
+ (struct dm_version_response *)dm_msg);
break;
case DM_CAPABILITIES_RESPONSE:
@@ -1565,7 +1523,7 @@ static void balloon_onchannelcallback(void *context)
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
- (struct dm_unballoon_request *)recv_buffer);
+ (struct dm_unballoon_request *)recv_buffer);
break;
case DM_MEM_HOT_ADD_REQUEST:
@@ -1603,17 +1561,15 @@ static void balloon_onchannelcallback(void *context)
default:
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
-
}
}
-
}
#define HV_LARGE_REPORTING_ORDER 9
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
HV_LARGE_REPORTING_ORDER)
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
- struct scatterlist *sgl, unsigned int nents)
+ struct scatterlist *sgl, unsigned int nents)
{
unsigned long flags;
struct hv_memory_hint *hint;
@@ -1648,7 +1604,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
*/
/* page reporting for pages 2MB or higher */
- if (order >= HV_LARGE_REPORTING_ORDER ) {
+ if (order >= HV_LARGE_REPORTING_ORDER) {
range->page.largepage = 1;
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
range->base_large_pfn = page_to_hvpfn(
@@ -1662,23 +1618,21 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
range->page.additional_pages =
(sg->length / HV_HYP_PAGE_SIZE) - 1;
}
-
}
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
hint, NULL);
local_irq_restore(flags);
if (!hv_result_success(status)) {
-
pr_err("Cold memory discard hypercall failed with status %llx\n",
- status);
+ status);
if (hv_hypercall_multi_failure > 0)
hv_hypercall_multi_failure++;
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
pr_err("Defaulting to page_reporting_order %d\n",
- pageblock_order);
+ pageblock_order);
page_reporting_order = pageblock_order;
hv_hypercall_multi_failure++;
return -EINVAL;
@@ -1712,7 +1666,7 @@ static void enable_page_reporting(void)
pr_err("Failed to enable cold memory discard: %d\n", ret);
} else {
pr_info("Cold memory discard hint enabled with order %d\n",
- page_reporting_order);
+ page_reporting_order);
}
}
@@ -1795,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1831,10 +1785,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
- * Specify our alignment requirements as it relates
- * memory hot-add. Specify 128MB alignment.
+ * Specify our alignment requirements for memory hot-add. The value is
+ * the log base 2 of the number of megabytes in a chunk. For example,
+ * with 256 MiB chunks, the value is 8. The number of MiB in a chunk
+ * must be a power of 2.
*/
- cap_msg.caps.cap_bits.hot_add_alignment = 7;
+ cap_msg.caps.cap_bits.hot_add_alignment =
+ ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
/*
* Currently the host does not use these
@@ -1850,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1891,8 +1848,8 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
char *sname;
seq_printf(f, "%-22s: %u.%u\n", "host_version",
- DYNMEM_MAJOR_VERSION(dm->version),
- DYNMEM_MINOR_VERSION(dm->version));
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
seq_printf(f, "%-22s:", "capabilities");
if (ballooning_enabled())
@@ -1941,10 +1898,10 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
- get_pages_committed(dm));
+ get_pages_committed(dm));
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
- dm->max_dynamic_page_count);
+ dm->max_dynamic_page_count);
return 0;
}
@@ -1954,7 +1911,7 @@ DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
{
debugfs_create_file("hv-balloon", 0444, NULL, b,
- &hv_balloon_debug_fops);
+ &hv_balloon_debug_fops);
}
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
@@ -1984,8 +1941,23 @@ static int balloon_probe(struct hv_device *dev,
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Hot-add must operate in chunks that are of size equal to the
+ * memory block size because that's what the core add_memory()
+ * interface requires. The Hyper-V interface requires that the memory
+ * block size be a power of 2, which is guaranteed by the check in
+ * memory_dev_init().
+ */
+ ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
do_hot_add = hot_add;
#else
+ /*
+ * Without MEMORY_HOTPLUG, the guest returns a failure status for all
+ * hot add requests from Hyper-V, and the chunk size is used only to
+ * specify alignment to Hyper-V as required by the host/guest protocol.
+ * Somewhat arbitrarily, use 128 MiB.
+ */
+ ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
do_hot_add = false;
#endif
dm_device.dev = dev;
@@ -2097,7 +2069,6 @@ static int balloon_suspend(struct hv_device *hv_dev)
tasklet_enable(&hv_dev->channel->callback_event);
return 0;
-
}
static int balloon_resume(struct hv_device *dev)
@@ -2156,7 +2127,6 @@ static struct hv_driver balloon_drv = {
static int __init init_balloon_drv(void)
{
-
return vmbus_driver_register(&balloon_drv);
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 48a81c64f00d..942526bd4775 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
},
+ {
+ .ident = "Dell G15 5511",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
{ }
};
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 4c8a80847891..5722cb9d81f9 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -33,6 +33,17 @@ struct iio_hwmon_state {
struct attribute **attrs;
};
+static ssize_t iio_hwmon_read_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct iio_hwmon_state *state = dev_get_drvdata(dev);
+ struct iio_channel *chan = &state->channels[sattr->index];
+
+ return iio_read_channel_label(chan, buf);
+}
+
/*
* Assumes that IIO and hwmon operate in the same base units.
* This is supposed to be true, but needs verification for
@@ -68,12 +79,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iio_hwmon_state *st;
struct sensor_device_attribute *a;
- int ret, i;
+ int ret, i, attr = 0;
int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1, power_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
struct device *hwmon_dev;
char *sname;
+ void *buf;
channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
@@ -85,17 +97,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL)
+ buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!st || !buf)
return -ENOMEM;
st->channels = channels;
- /* count how many attributes we have */
+ /* count how many channels we have */
while (st->channels[st->num_channels].indio_dev)
st->num_channels++;
st->attrs = devm_kcalloc(dev,
- st->num_channels + 1, sizeof(*st->attrs),
+ 2 * st->num_channels + 1, sizeof(*st->attrs),
GFP_KERNEL);
if (st->attrs == NULL)
return -ENOMEM;
@@ -147,9 +160,31 @@ static int iio_hwmon_probe(struct platform_device *pdev)
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = 0444;
a->index = i;
- st->attrs[i] = &a->dev_attr.attr;
+ st->attrs[attr++] = &a->dev_attr.attr;
+
+ /* Let's see if we have a label... */
+ if (iio_read_channel_label(&st->channels[i], buf) < 0)
+ continue;
+
+ a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
+ if (a == NULL)
+ return -ENOMEM;
+
+ sysfs_attr_init(&a->dev_attr.attr);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d_label",
+ prefix, n);
+ if (!a->dev_attr.attr.name)
+ return -ENOMEM;
+
+ a->dev_attr.show = iio_hwmon_read_label;
+ a->dev_attr.attr.mode = 0444;
+ a->index = i;
+ st->attrs[attr++] = &a->dev_attr.attr;
}
+ devm_free_pages(dev, (unsigned long)buf);
+
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index 6500ca548f9c..ca2dff158925 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
};
static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
- { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
+ { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
};
static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 229aed15d5ca..d4a93223cd3b 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) {
- if (!val)
+ if (!val) {
+ fwnode_handle_put(child);
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
+ }
st->r_sense_uohm[addr] = val;
}
}
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index a812c15948d9..5a682195b98f 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -360,10 +360,10 @@ static int init_core_mask(struct peci_cputemp *priv)
int ret;
/* Get the RESOLVED_CORES register value */
- switch (peci_dev->info.model) {
- case INTEL_FAM6_ICELAKE_X:
- case INTEL_FAM6_ICELAKE_D:
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ switch (peci_dev->info.x86_vfm) {
+ case INTEL_ICELAKE_X:
+ case INTEL_ICELAKE_D:
+ case INTEL_SAPPHIRERAPIDS_X:
ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev,
reg->func, reg->offset + 4, &data);
if (ret)
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index 1f96e94967ee..439dd3dba5fc 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
if (np) {
data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
- data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
+ data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
} else {
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 9d550f5697fa..64e171eaad82 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -275,7 +275,7 @@ static int of_get_coresight_platform_data(struct device *dev,
*/
if (!parent) {
/*
- * Avoid warnings in of_graph_get_next_endpoint()
+ * Avoid warnings in for_each_endpoint_of_node()
* if the device doesn't have any graph connections
*/
if (!of_graph_is_present(node))
@@ -286,7 +286,7 @@ static int of_get_coresight_platform_data(struct device *dev,
}
/* Iterate through each output port to discover topology */
- while ((ep = of_graph_get_next_endpoint(parent, ep))) {
+ for_each_endpoint_of_node(parent, ep) {
/*
* Legacy binding mixes input/output ports under the
* same parent. So, skip the input ports if we are dealing
@@ -297,8 +297,10 @@ static int of_get_coresight_platform_data(struct device *dev,
continue;
ret = of_coresight_parse_endpoint(dev, ep, pdata);
- if (ret)
+ if (ret) {
+ of_node_put(ep);
return ret;
+ }
}
return 0;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index fc3617642b01..61a46d3bdcc8 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -13,7 +13,7 @@
#include <linux/pm_runtime.h>
extern struct mutex coresight_mutex;
-extern struct device_type coresight_dev_type[];
+extern const struct device_type coresight_dev_type[];
/*
* Coresight management registers (0xf00-0xfcc)
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index f9444e2cb1d9..1e67cc7758d7 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -377,7 +377,7 @@ static struct attribute *coresight_source_attrs[] = {
};
ATTRIBUTE_GROUPS(coresight_source);
-struct device_type coresight_dev_type[] = {
+const struct device_type coresight_dev_type[] = {
[CORESIGHT_DEV_TYPE_SINK] = {
.name = "sink",
.groups = coresight_sink_groups,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 4f11a739ae4d..b54562f392f3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -26,7 +26,6 @@
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
-#include <linux/acpi.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
index 891b28ea25fe..256ce3260ad9 100644
--- a/drivers/hwtracing/intel_th/msu-sink.c
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -116,4 +116,5 @@ static const struct msu_buffer sink_mbuf = {
module_intel_th_msu_buffer(sink_mbuf);
+MODULE_DESCRIPTION("example software sink buffer for Intel TH MSU");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3d65934f5eb4..78d0561339e5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -29,8 +29,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_VIA) += i2c-via.o
obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
-i2c-zhaoxin-objs := i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o
+obj-$(CONFIG_I2C_ZHAOXIN) += i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
# Mac SMBus host controller drivers
obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
@@ -120,8 +119,7 @@ obj-$(CONFIG_I2C_TEGRA_BPMP) += i2c-tegra-bpmp.o
obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
-i2c-wmt-objs := i2c-viai2c-wmt.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
+obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o
i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
index d6eeea5166c0..131a67d9d4a6 100644
--- a/drivers/i2c/busses/i2c-at91-slave.c
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
- | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+ return I2C_FUNC_SLAVE;
}
static const struct i2c_algorithm at91_twi_algorithm_slave = {
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 2e079cf20bb5..78e2c47e3d7d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
{
- dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
+ dev->functionality = I2C_FUNC_SLAVE;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 56a4dabf5a38..4ad670a80a63 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -431,8 +431,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
/* Init the device */
- oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
+ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0af86a542568..3249bbd5eb43 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -400,7 +400,6 @@ static struct parport_driver i2c_parport_driver = {
.name = "i2c-parport",
.match_port = i2c_parport_attach,
.detach = i2c_parport_detach,
- .devmodel = true,
};
module_parport_driver(i2c_parport_driver);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 31ecb2c7e978..4eccbcd0fbfc 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -138,7 +138,6 @@ struct synquacer_i2c {
int irq;
struct device *dev;
void __iomem *base;
- struct clk *pclk;
u32 pclkrate;
u32 speed_khz;
u32 timeout_ms;
@@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = {
static int synquacer_i2c_probe(struct platform_device *pdev)
{
struct synquacer_i2c *i2c;
+ struct clk *pclk;
u32 bus_speed;
int ret;
@@ -550,13 +550,12 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
- if (IS_ERR(i2c->pclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
"failed to get and enable clock\n");
- dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
- i2c->pclkrate = clk_get_rate(i2c->pclk);
+ i2c->pclkrate = clk_get_rate(pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
diff --git a/drivers/i2c/busses/i2c-viai2c-common.c b/drivers/i2c/busses/i2c-viai2c-common.c
index 1844d13f1f79..162b31306cba 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.c
+++ b/drivers/i2c/busses/i2c-viai2c-common.c
@@ -17,6 +17,7 @@ int viai2c_wait_bus_not_busy(struct viai2c *i2c)
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_wait_bus_not_busy);
static int viai2c_write(struct viai2c *i2c, struct i2c_msg *pmsg, int last)
{
@@ -121,6 +122,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return (ret < 0) ? ret : i;
}
+EXPORT_SYMBOL_GPL(viai2c_xfer);
/*
* Main process of the byte mode xfer
@@ -130,7 +132,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
* 0: there is still data that needs to be transferred
* -EIO: error occurred
*/
-static int viai2c_irq_xfer(struct viai2c *i2c)
+int viai2c_irq_xfer(struct viai2c *i2c)
{
u16 val;
struct i2c_msg *msg = i2c->msg;
@@ -171,51 +173,11 @@ static int viai2c_irq_xfer(struct viai2c *i2c)
return i2c->xfered_len == msg->len;
}
-
-int __weak viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
-{
- return 0;
-}
-
-static irqreturn_t viai2c_isr(int irq, void *data)
-{
- struct viai2c *i2c = data;
- u8 status;
-
- /* save the status and write-clear it */
- status = readw(i2c->base + VIAI2C_REG_ISR);
- if (!status && i2c->platform == VIAI2C_PLAT_ZHAOXIN)
- return IRQ_NONE;
-
- writew(status, i2c->base + VIAI2C_REG_ISR);
-
- i2c->ret = 0;
- if (status & VIAI2C_ISR_NACK_ADDR)
- i2c->ret = -EIO;
-
- if (i2c->platform == VIAI2C_PLAT_WMT && (status & VIAI2C_ISR_SCL_TIMEOUT))
- i2c->ret = -ETIMEDOUT;
-
- if (!i2c->ret) {
- if (i2c->mode == VIAI2C_BYTE_MODE)
- i2c->ret = viai2c_irq_xfer(i2c);
- else
- i2c->ret = viai2c_fifo_irq_xfer(i2c, true);
- }
-
- /* All the data has been successfully transferred or error occurred */
- if (i2c->ret)
- complete(&i2c->complete);
-
- return IRQ_HANDLED;
-}
+EXPORT_SYMBOL_GPL(viai2c_irq_xfer);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
{
- int err;
- int irq_flags;
struct viai2c *i2c;
- struct device_node *np = pdev->dev.of_node;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -225,28 +187,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- if (plat == VIAI2C_PLAT_WMT) {
- irq_flags = 0;
- i2c->irq = irq_of_parse_and_map(np, 0);
- if (!i2c->irq)
- return -EINVAL;
- } else if (plat == VIAI2C_PLAT_ZHAOXIN) {
- irq_flags = IRQF_SHARED;
- i2c->irq = platform_get_irq(pdev, 0);
- if (i2c->irq < 0)
- return i2c->irq;
- } else {
- return dev_err_probe(&pdev->dev, -EINVAL, "wrong platform type\n");
- }
-
i2c->platform = plat;
- err = devm_request_irq(&pdev->dev, i2c->irq, viai2c_isr,
- irq_flags, pdev->name, i2c);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to request irq %i\n", i2c->irq);
-
i2c->dev = &pdev->dev;
init_completion(&i2c->complete);
platform_set_drvdata(pdev, i2c);
@@ -254,3 +196,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
*pi2c = i2c;
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_init);
+
+MODULE_DESCRIPTION("Via/Wondermedia/Zhaoxin I2C master-mode bus adapter");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-viai2c-common.h b/drivers/i2c/busses/i2c-viai2c-common.h
index 81e827c54434..00f17733223c 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.h
+++ b/drivers/i2c/busses/i2c-viai2c-common.h
@@ -80,6 +80,6 @@ struct viai2c {
int viai2c_wait_bus_not_busy(struct viai2c *i2c);
int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat);
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq);
+int viai2c_irq_xfer(struct viai2c *i2c);
#endif
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index e1988f946026..420fd10fe3aa 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -72,6 +72,32 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
return 0;
}
+static irqreturn_t wmt_i2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (status & VIAI2C_ISR_SCL_TIMEOUT)
+ i2c->ret = -ETIMEDOUT;
+
+ if (!i2c->ret)
+ i2c->ret = viai2c_irq_xfer(i2c);
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int wmt_i2c_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -84,6 +110,16 @@ static int wmt_i2c_probe(struct platform_device *pdev)
if (err)
return err;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ err = devm_request_irq(&pdev->dev, i2c->irq, wmt_i2c_isr,
+ 0, pdev->name, i2c);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq %i\n", i2c->irq);
+
i2c->clk = of_clk_get(np, 0);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "unable to request clock\n");
diff --git a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
index 7e3ac2a3e1fd..ab3e44e147e9 100644
--- a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
+++ b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
@@ -49,8 +49,7 @@ struct viai2c_zhaoxin {
u16 xfer_len;
};
-/* 'irq == true' means in interrupt context */
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
+static int viai2c_fifo_xfer(struct viai2c *i2c)
{
u16 i;
u8 tmp;
@@ -59,17 +58,6 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
bool read = !!(msg->flags & I2C_M_RD);
struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
- if (irq) {
- /* get the received data */
- if (read)
- for (i = 0; i < priv->xfer_len; i++)
- msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
-
- i2c->xfered_len += priv->xfer_len;
- if (i2c->xfered_len == msg->len)
- return 1;
- }
-
/* reset fifo buffer */
tmp = ioread8(base + ZXI2C_REG_HCR);
iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
@@ -92,18 +80,59 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
iowrite8(tmp, base + VIAI2C_REG_CR);
}
- if (irq) {
- /* continue transmission */
- tmp = ioread8(base + VIAI2C_REG_CR);
- iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+ u16 tcr_val = i2c->tcr;
+
+ /* start transmission */
+ tcr_val |= read ? VIAI2C_TCR_READ : 0;
+ writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+
+ return 0;
+}
+
+static int viai2c_fifo_irq_xfer(struct viai2c *i2c)
+{
+ u16 i;
+ u8 tmp;
+ struct i2c_msg *msg = i2c->msg;
+ void __iomem *base = i2c->base;
+ bool read = !!(msg->flags & I2C_M_RD);
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+
+ /* get the received data */
+ if (read)
+ for (i = 0; i < priv->xfer_len; i++)
+ msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
+
+ i2c->xfered_len += priv->xfer_len;
+ if (i2c->xfered_len == msg->len)
+ return 1;
+
+ /* reset fifo buffer */
+ tmp = ioread8(base + ZXI2C_REG_HCR);
+ iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
+
+ /* set xfer len */
+ priv->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE);
+ if (read) {
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HRLR);
} else {
- u16 tcr_val = i2c->tcr;
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HTLR);
+ /* set write data */
+ for (i = 0; i < priv->xfer_len; i++)
+ iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR);
+ }
- /* start transmission */
- tcr_val |= read ? VIAI2C_TCR_READ : 0;
- writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+ /* prepare to stop transmission */
+ if (priv->hrv && msg->len == (i2c->xfered_len + priv->xfer_len)) {
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ tmp |= read ? VIAI2C_CR_RX_END : VIAI2C_CR_TX_END;
+ iowrite8(tmp, base + VIAI2C_REG_CR);
}
+ /* continue transmission */
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+
return 0;
}
@@ -135,7 +164,7 @@ static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int
priv->xfer_len = 0;
i2c->xfered_len = 0;
- viai2c_fifo_irq_xfer(i2c, 0);
+ viai2c_fifo_xfer(i2c);
if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
return -ETIMEDOUT;
@@ -228,6 +257,36 @@ static void zxi2c_get_bus_speed(struct viai2c *i2c)
dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0]));
}
+static irqreturn_t zxi2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ if (!status)
+ return IRQ_NONE;
+
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (!i2c->ret) {
+ if (i2c->mode == VIAI2C_BYTE_MODE)
+ i2c->ret = viai2c_irq_xfer(i2c);
+ else
+ i2c->ret = viai2c_fifo_irq_xfer(i2c);
+ }
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int zxi2c_probe(struct platform_device *pdev)
{
int error;
@@ -239,6 +298,16 @@ static int zxi2c_probe(struct platform_device *pdev)
if (error)
return error;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ error = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr,
+ IRQF_SHARED, pdev->name, i2c);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed to request irq %i\n", i2c->irq);
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index a49642bbae4b..ca43e98cae1b 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
queue_delayed_work(system_long_wq, &tu->worker,
msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
}
- fallthrough;
+ break;
case I2C_SLAVE_WRITE_REQUESTED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 9c351ffc7bed..661127aed2f9 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -14,6 +14,7 @@ if IIO
config IIO_BUFFER
bool "Enable buffer support within IIO"
+ select DMA_SHARED_BUFFER
help
Provide core support for various buffer based data
acquisition methods.
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c2da5066e9a7..80b57d3ee3a7 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -330,6 +330,8 @@ config DMARD10
config FXLS8962AF
tristate
depends on I2C || !I2C # cannot be built-in for modular I2C
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
config FXLS8962AF_I2C
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
diff --git a/drivers/iio/accel/adxl313_spi.c b/drivers/iio/accel/adxl313_spi.c
index b7cc15678a2b..6f8d73f6e5a9 100644
--- a/drivers/iio/accel/adxl313_spi.c
+++ b/drivers/iio/accel/adxl313_spi.c
@@ -72,13 +72,7 @@ static int adxl313_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- /*
- * Retrieves device specific data as a pointer to a
- * adxl313_chip_info structure
- */
- chip_data = device_get_match_data(&spi->dev);
- if (!chip_data)
- chip_data = (const struct adxl313_chip_info *)spi_get_device_id(spi)->driver_data;
+ chip_data = spi_get_device_match_data(spi);
regmap = devm_regmap_init_spi(spi,
&adxl31x_spi_regmap_config[chip_data->type]);
diff --git a/drivers/iio/accel/adxl355_spi.c b/drivers/iio/accel/adxl355_spi.c
index fc99534d91ff..5153ac815e4b 100644
--- a/drivers/iio/accel/adxl355_spi.c
+++ b/drivers/iio/accel/adxl355_spi.c
@@ -28,13 +28,9 @@ static int adxl355_spi_probe(struct spi_device *spi)
const struct adxl355_chip_info *chip_data;
struct regmap *regmap;
- chip_data = device_get_match_data(&spi->dev);
- if (!chip_data) {
- chip_data = (void *)spi_get_device_id(spi)->driver_data;
-
- if (!chip_data)
- return -EINVAL;
- }
+ chip_data = spi_get_device_match_data(spi);
+ if (!chip_data)
+ return -EINVAL;
regmap = devm_regmap_init_spi(spi, &adxl355_spi_regmap_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index 62c74bdc0d77..deb82a43ec36 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -61,8 +61,8 @@ static int adxl367_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adxl367_i2c_id[] = {
- { "adxl367", 0 },
- { },
+ { "adxl367" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, adxl367_i2c_id);
diff --git a/drivers/iio/accel/adxl372_i2c.c b/drivers/iio/accel/adxl372_i2c.c
index d0690417fd36..3571cfde1c0e 100644
--- a/drivers/iio/accel/adxl372_i2c.c
+++ b/drivers/iio/accel/adxl372_i2c.c
@@ -42,7 +42,7 @@ static int adxl372_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adxl372_i2c_id[] = {
- { "adxl372", 0 },
+ { "adxl372" },
{}
};
MODULE_DEVICE_TABLE(i2c, adxl372_i2c_id);
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
index adf4e3fd2e1d..c1c72f577295 100644
--- a/drivers/iio/accel/bma400_i2c.c
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -28,7 +28,7 @@ static int bma400_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id bma400_i2c_ids[] = {
- { "bma400", 0 },
+ { "bma400" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bma400_i2c_ids);
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 4d989708e6c3..469a1255d93c 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -114,11 +114,6 @@ enum bmi088_odr_modes {
BMI088_ACCEL_MODE_ODR_1600 = 0xc,
};
-struct bmi088_scale_info {
- int scale;
- u8 reg_range;
-};
-
struct bmi088_accel_chip_info {
const char *name;
u8 chip_id;
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index 8f919920ced5..94f827acdd1c 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -268,7 +268,7 @@ static int da311_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
static const struct i2c_device_id da311_i2c_id[] = {
- {"da311", 0},
+ { "da311" },
{}
};
MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 2e719d60fff8..fb14894c66f9 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -201,9 +201,9 @@ static DEFINE_SIMPLE_DEV_PM_OPS(dmard06_pm_ops, dmard06_suspend,
dmard06_resume);
static const struct i2c_device_id dmard06_id[] = {
- { "dmard05", 0 },
- { "dmard06", 0 },
- { "dmard07", 0 },
+ { "dmard05" },
+ { "dmard06" },
+ { "dmard07" },
{ }
};
MODULE_DEVICE_TABLE(i2c, dmard06_id);
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index fa98623de579..6644c1fec3e6 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -125,8 +125,8 @@ static int dmard09_probe(struct i2c_client *client)
}
static const struct i2c_device_id dmard09_id[] = {
- { "dmard09", 0 },
- { },
+ { "dmard09" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, dmard09_id);
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index 7745b6ffd1ad..35c0eefb741e 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -231,7 +231,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend,
dmard10_resume);
static const struct i2c_device_id dmard10_i2c_id[] = {
- {"dmard10", 0},
+ { "dmard10" },
{}
};
MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 4fbc01bda62e..d25e31613413 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -228,8 +228,8 @@ static int fxls8962af_power_off(struct fxls8962af_data *data)
static int fxls8962af_standby(struct fxls8962af_data *data)
{
- return regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
- FXLS8962AF_SENS_CONFIG1_ACTIVE, 0);
+ return regmap_clear_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SENS_CONFIG1_ACTIVE);
}
static int fxls8962af_active(struct fxls8962af_data *data)
@@ -785,9 +785,8 @@ static int fxls8962af_reset(struct fxls8962af_data *data)
unsigned int reg;
int ret;
- ret = regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
- FXLS8962AF_SENS_CONFIG1_RST,
- FXLS8962AF_SENS_CONFIG1_RST);
+ ret = regmap_set_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SENS_CONFIG1_RST);
if (ret)
return ret;
@@ -830,9 +829,8 @@ static int fxls8962af_buffer_postenable(struct iio_dev *indio_dev)
fxls8962af_standby(data);
/* Enable buffer interrupt */
- ret = regmap_update_bits(data->regmap, FXLS8962AF_INT_EN,
- FXLS8962AF_INT_EN_BUF_EN,
- FXLS8962AF_INT_EN_BUF_EN);
+ ret = regmap_set_bits(data->regmap, FXLS8962AF_INT_EN,
+ FXLS8962AF_INT_EN_BUF_EN);
if (ret)
return ret;
@@ -851,8 +849,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
fxls8962af_standby(data);
/* Disable buffer interrupt */
- ret = regmap_update_bits(data->regmap, FXLS8962AF_INT_EN,
- FXLS8962AF_INT_EN_BUF_EN, 0);
+ ret = regmap_clear_bits(data->regmap, FXLS8962AF_INT_EN,
+ FXLS8962AF_INT_EN_BUF_EN);
if (ret)
return ret;
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 3bc9ee1f9db3..c4c7e2d4e98a 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -43,8 +43,8 @@ static const struct of_device_id kxsd9_of_match[] = {
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
static const struct i2c_device_id kxsd9_i2c_id[] = {
- {"kxsd9", 0},
- { },
+ { "kxsd9" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, kxsd9_i2c_id);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index ba99649fe195..70dfd6e354db 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -370,10 +370,7 @@ static int kxsd9_power_down(struct kxsd9_state *st)
* make sure we conserve power even if there are others users on the
* regulators.
*/
- ret = regmap_update_bits(st->map,
- KXSD9_REG_CTRL_B,
- KXSD9_CTRL_B_ENABLE,
- 0);
+ ret = regmap_clear_bits(st->map, KXSD9_REG_CTRL_B, KXSD9_CTRL_B_ENABLE);
if (ret)
return ret;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index 6b87c2c9945c..caa40a14a631 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -180,7 +180,7 @@ static int mc3230_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(mc3230_pm_ops, mc3230_suspend, mc3230_resume);
static const struct i2c_device_id mc3230_i2c_id[] = {
- {"mc3230", 0},
+ { "mc3230" },
{}
};
MODULE_DEVICE_TABLE(i2c, mc3230_i2c_id);
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index 14f7850a22f0..36a357c8e9ed 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -32,8 +32,8 @@ static void mma7455_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id mma7455_i2c_ids[] = {
- { "mma7455", 0 },
- { "mma7456", 0 },
+ { "mma7455" },
+ { "mma7456" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mma7455_i2c_ids);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 260cbceaa151..2894aff80161 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -38,21 +38,6 @@
static const int mma7660_nscale = 467142857;
-#define MMA7660_CHANNEL(reg, axis) { \
- .type = IIO_ACCEL, \
- .address = reg, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
-}
-
-static const struct iio_chan_spec mma7660_channels[] = {
- MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
- MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
- MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
-};
-
enum mma7660_mode {
MMA7660_MODE_STANDBY,
MMA7660_MODE_ACTIVE
@@ -62,6 +47,21 @@ struct mma7660_data {
struct i2c_client *client;
struct mutex lock;
enum mma7660_mode mode;
+ struct iio_mount_matrix orientation;
+};
+
+static const struct iio_mount_matrix *
+mma7660_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mma7660_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info mma7660_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, mma7660_get_mount_matrix),
+ { }
};
static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL);
@@ -75,6 +75,22 @@ static const struct attribute_group mma7660_attribute_group = {
.attrs = mma7660_attributes
};
+#define MMA7660_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = mma7660_ext_info, \
+}
+
+static const struct iio_chan_spec mma7660_channels[] = {
+ MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
+ MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
+ MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
+};
+
static int mma7660_set_mode(struct mma7660_data *data,
enum mma7660_mode mode)
{
@@ -187,6 +203,10 @@ static int mma7660_probe(struct i2c_client *client)
mutex_init(&data->lock);
data->mode = MMA7660_MODE_STANDBY;
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+ if (ret)
+ return ret;
+
indio_dev->info = &mma7660_info;
indio_dev->name = MMA7660_DRIVER_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -241,7 +261,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend,
mma7660_resume);
static const struct i2c_device_id mma7660_i2c_id[] = {
- {"mma7660", 0},
+ { "mma7660" },
{}
};
MODULE_DEVICE_TABLE(i2c, mma7660_i2c_id);
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 083c08f65baf..fa1799b0b0df 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -595,7 +595,7 @@ static const struct acpi_device_id mma9551_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, mma9551_acpi_match);
static const struct i2c_device_id mma9551_id[] = {
- {"mma9551", 0},
+ { "mma9551" },
{}
};
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 3cbd0fd4e624..86543f34ef17 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1234,8 +1234,8 @@ static const struct acpi_device_id mma9553_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, mma9553_acpi_match);
static const struct i2c_device_id mma9553_id[] = {
- {"mma9553", 0},
- {},
+ { "mma9553" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, mma9553_id);
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index b8ddbfd98f11..4cdbf5424a53 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -1034,10 +1034,10 @@ static int msa311_chip_init(struct msa311_priv *msa311)
"failed to unmap map0/map1 interrupts\n");
/* Disable all axes by default */
- err = regmap_update_bits(msa311->regs, MSA311_ODR_REG,
- MSA311_GENMASK(F_X_AXIS_DIS) |
- MSA311_GENMASK(F_Y_AXIS_DIS) |
- MSA311_GENMASK(F_Z_AXIS_DIS), 0);
+ err = regmap_clear_bits(msa311->regs, MSA311_ODR_REG,
+ MSA311_GENMASK(F_X_AXIS_DIS) |
+ MSA311_GENMASK(F_Y_AXIS_DIS) |
+ MSA311_GENMASK(F_Z_AXIS_DIS));
if (err)
return dev_err_probe(dev, err, "can't enable all axes\n");
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index e56407b6f204..fc54a2a4693c 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -584,9 +584,9 @@ static const struct of_device_id mxc4005_of_match[] = {
MODULE_DEVICE_TABLE(of, mxc4005_of_match);
static const struct i2c_device_id mxc4005_id[] = {
- {"mxc4005", 0},
- {"mxc6655", 0},
- { },
+ { "mxc4005" },
+ { "mxc6655" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mxc4005_id);
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index ac228128c4f9..a8abda7b2a63 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -172,8 +172,8 @@ static const struct acpi_device_id mxc6255_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, mxc6255_acpi_match);
static const struct i2c_device_id mxc6255_id[] = {
- {"mxc6225", 0},
- {"mxc6255", 0},
+ { "mxc6225" },
+ { "mxc6255" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mxc6255_id);
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index e7525615712b..2659f536cef6 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -35,6 +35,7 @@
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
+#define LIS2DS12_ACCEL_DEV_NAME "lis2ds12"
#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
#define LIS302DL_ACCEL_DEV_NAME "lis302dl"
#define LSM303C_ACCEL_DEV_NAME "lsm303c_accel"
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index d2104e14e255..0e371efbda70 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -926,6 +926,87 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bootime = 2,
},
{
+ .wai = 0x43,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2DS12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 10, .value = 0x01, },
+ { .hz = 50, .value = 0x02, },
+ { .hz = 100, .value = 0x03, },
+ { .hz = 200, .value = 0x04, },
+ { .hz = 400, .value = 0x05, },
+ { .hz = 800, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x20,
+ .mask = 0x0c,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(122),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(488),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x01,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x01,
+ },
+ .int2 = {
+ .addr = 0x24,
+ .mask = 0x01,
+ },
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x01,
+ },
+ },
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+ {
.wai = 0x41,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index fd3749871121..329a4d6fb2ec 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -103,6 +103,10 @@ static const struct of_device_id st_accel_of_match[] = {
.data = LIS2DE12_ACCEL_DEV_NAME,
},
{
+ .compatible = "st,lis2ds12",
+ .data = LIS2DS12_ACCEL_DEV_NAME,
+ },
+ {
.compatible = "st,lis2hh12",
.data = LIS2HH12_ACCEL_DEV_NAME,
},
@@ -154,6 +158,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
+ { LIS2DS12_ACCEL_DEV_NAME },
{ LIS2HH12_ACCEL_DEV_NAME },
{ LIS302DL_ACCEL_DEV_NAME },
{ LSM303C_ACCEL_DEV_NAME },
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index f72a24f45322..825adab37105 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -65,6 +65,10 @@ static const struct of_device_id st_accel_of_match[] = {
.data = LIS2DH12_ACCEL_DEV_NAME,
},
{
+ .compatible = "st,lis2ds12",
+ .data = LIS2DS12_ACCEL_DEV_NAME,
+ },
+ {
.compatible = "st,lis3l02dq",
.data = LIS3L02DQ_ACCEL_DEV_NAME,
},
@@ -151,6 +155,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM330_ACCEL_DEV_NAME },
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
+ { LIS2DS12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
{ LNG2DM_ACCEL_DEV_NAME },
{ H3LIS331DL_ACCEL_DEV_NAME },
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index ef0ae7672253..b3534d5751b9 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -633,8 +633,8 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk8312_pm_ops, stk8312_suspend,
static const struct i2c_device_id stk8312_i2c_id[] = {
/* Deprecated in favour of lowercase form */
- { "STK8312", 0 },
- { "stk8312", 0 },
+ { "STK8312" },
+ { "stk8312" },
{}
};
MODULE_DEVICE_TABLE(i2c, stk8312_i2c_id);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 668edc88c89d..6d3c7f444d21 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -525,7 +525,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk8ba50_pm_ops, stk8ba50_suspend,
stk8ba50_resume);
static const struct i2c_device_id stk8ba50_i2c_id[] = {
- {"stk8ba50", 0},
+ { "stk8ba50" },
{}
};
MODULE_DEVICE_TABLE(i2c, stk8ba50_i2c_id);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 8db68b80b391..f60fe85a30d5 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -88,12 +88,17 @@ config AD7173
called ad7173.
config AD7192
- tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver"
+ tristate "Analog Devices AD7192 and similar ADC driver"
depends on SPI
select AD_SIGMA_DELTA
help
- Say yes here to build support for Analog Devices AD7190,
- AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC).
+ Say yes here to build support for Analog Devices SPI analog to digital
+ converters (ADC):
+ - AD7190
+ - AD7192
+ - AD7193
+ - AD7194
+ - AD7195
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
@@ -155,6 +160,22 @@ config AD7298
To compile this driver as a module, choose M here: the
module will be called ad7298.
+config AD7380
+ tristate "Analog Devices AD7380 ADC driver"
+ depends on SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGER
+ select IIO_TRIGGERED_BUFFER
+ help
+ AD7380 is a family of simultaneous sampling ADCs that share the same
+ SPI register map and have similar pinouts.
+
+ Say yes here to build support for Analog Devices AD7380 ADC and
+ similar chips.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7380.
+
config AD7476
tristate "Analog Devices AD7476 1-channel ADCs driver and other similar devices from AD and TI"
depends on SPI
@@ -332,6 +353,7 @@ config AD9467
config ADI_AXI_ADC
tristate "Analog Devices Generic AXI ADC IP core driver"
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
select IIO_BUFFER_DMAENGINE
@@ -870,6 +892,18 @@ config MCP3911
This driver can also be built as a module. If so, the module will be
called mcp3911.
+config MEDIATEK_MT6359_AUXADC
+ tristate "MediaTek MT6359 PMIC AUXADC driver"
+ depends on MFD_MT6397
+ help
+ Say yes here to enable support for MediaTek MT6357, MT6358 and
+ MT6359 PMICs Auxiliary ADC.
+ This driver provides multiple channels for system monitoring,
+ such as battery voltage, PMIC temperature, and others.
+
+ This driver can also be built as a module. If so, the module will be
+ called mt6359-auxadc.
+
config MEDIATEK_MT6360_ADC
tristate "Mediatek MT6360 ADC driver"
depends on MFD_MT6360
@@ -1329,6 +1363,18 @@ config TI_ADS1015
This driver can also be built as a module. If so, the module will be
called ti-ads1015.
+config TI_ADS1119
+ tristate "Texas Instruments ADS1119 ADC"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS1119
+ ADC chip.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1119.
+
config TI_ADS7924
tristate "Texas Instruments ADS7924 ADC"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index edb32ce2af02..d370e066544e 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_AD7280) += ad7280a.o
obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7292) += ad7292.o
obj-$(CONFIG_AD7298) += ad7298.o
-obj-$(CONFIG_AD7923) += ad7923.o
+obj-$(CONFIG_AD7380) += ad7380.o
obj-$(CONFIG_AD7476) += ad7476.o
obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
@@ -29,6 +29,7 @@ obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
+obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7944) += ad7944.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MCP3564) += mcp3564.o
obj-$(CONFIG_MCP3911) += mcp3911.o
+obj-$(CONFIG_MEDIATEK_MT6359_AUXADC) += mt6359-auxadc.o
obj-$(CONFIG_MEDIATEK_MT6360_ADC) += mt6360-adc.o
obj-$(CONFIG_MEDIATEK_MT6370_ADC) += mt6370-adc.o
obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
@@ -90,42 +92,44 @@ obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
obj-$(CONFIG_PAC1934) += pac1934.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
+obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_QCOM_SPMI_ADC5) += qcom-spmi-adc5.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_RRADC) += qcom-spmi-rradc.o
-obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
-obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
+obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
+obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
-obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
-obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
-obj-$(CONFIG_SUN20I_GPADC) += sun20i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
-obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
obj-$(CONFIG_STMPE_ADC) += stmpe-adc.o
+obj-$(CONFIG_SUN20I_GPADC) += sun20i-gpadc-iio.o
+obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
-obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
obj-$(CONFIG_TI_ADC108S102) += ti-adc108s102.o
+obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS1100) += ti-ads1100.o
+obj-$(CONFIG_TI_ADS1119) += ti-ads1119.o
+obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_ADS1298) += ti-ads1298.o
+obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
-obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
-obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TI_LMP92064) += ti-lmp92064.o
obj-$(CONFIG_TI_TLC4541) += ti-tlc4541.o
@@ -134,7 +138,6 @@ obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
-obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
-obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index aaf1fb0ac447..e134d6497827 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1883,8 +1883,8 @@ static int ad4130_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
- ret = regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG,
- AD4130_FIFO_CONTROL_HEADER_MASK, 0);
+ ret = regmap_clear_bits(st->regmap, AD4130_FIFO_CONTROL_REG,
+ AD4130_FIFO_CONTROL_HEADER_MASK);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index e7b1d517d3de..3beed78496c5 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -555,10 +555,18 @@ static int ad7124_disable_all(struct ad_sigma_delta *sd)
return 0;
}
+static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+ return ad7124_spi_write_mask(st, AD7124_CHANNEL(chan), AD7124_CHANNEL_EN_MSK, 0, 2);
+}
+
static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.set_channel = ad7124_set_channel,
.append_status = ad7124_append_status,
.disable_all = ad7124_disable_all,
+ .disable_one = ad7124_disable_one,
.set_mode = ad7124_set_mode,
.has_registers = true,
.addr_shift = 0,
@@ -582,12 +590,6 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- /* After the conversion is performed, disable the channel */
- ret = ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan->address), 2,
- st->channels[chan->address].ain | AD7124_CHANNEL_EN(0));
- if (ret < 0)
- return ret;
-
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
mutex_lock(&st->cfgs_lock);
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index a7826bba0852..9544bf7142ad 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * AD717x family SPI ADC driver
+ * AD717x and AD411x family SPI ADC driver
*
* Supported devices:
+ * AD4111/AD4112/AD4114/AD4115/AD4116
* AD7172-2/AD7172-4/AD7173-8/AD7175-2
* AD7175-8/AD7176-2/AD7177-2
*
@@ -60,11 +61,19 @@
#define AD7173_CH_SETUP_AINPOS_MASK GENMASK(9, 5)
#define AD7173_CH_SETUP_AINNEG_MASK GENMASK(4, 0)
+#define AD7173_NO_AINS_PER_CHANNEL 2
#define AD7173_CH_ADDRESS(pos, neg) \
(FIELD_PREP(AD7173_CH_SETUP_AINPOS_MASK, pos) | \
FIELD_PREP(AD7173_CH_SETUP_AINNEG_MASK, neg))
#define AD7173_AIN_TEMP_POS 17
#define AD7173_AIN_TEMP_NEG 18
+#define AD7173_AIN_POW_MON_POS 19
+#define AD7173_AIN_POW_MON_NEG 20
+#define AD7173_AIN_REF_POS 21
+#define AD7173_AIN_REF_NEG 22
+
+#define AD7173_IS_REF_INPUT(x) ((x) == AD7173_AIN_REF_POS || \
+ (x) == AD7173_AIN_REF_NEG)
#define AD7172_2_ID 0x00d0
#define AD7175_ID 0x0cd0
@@ -72,6 +81,11 @@
#define AD7175_2_ID 0x0cd0
#define AD7172_4_ID 0x2050
#define AD7173_ID 0x30d0
+#define AD4111_ID AD7173_ID
+#define AD4112_ID AD7173_ID
+#define AD4114_ID AD7173_ID
+#define AD4116_ID 0x34d0
+#define AD4115_ID 0x38d0
#define AD7175_8_ID 0x3cd0
#define AD7177_ID 0x4fd0
#define AD7173_ID_MASK GENMASK(15, 4)
@@ -102,6 +116,7 @@
#define AD7173_GPO12_DATA(x) BIT((x) + 0)
#define AD7173_GPO23_DATA(x) BIT((x) + 4)
+#define AD4111_GPO01_DATA(x) BIT((x) + 6)
#define AD7173_GPO_DATA(x) ((x) < 2 ? AD7173_GPO12_DATA(x) : AD7173_GPO23_DATA(x))
#define AD7173_INTERFACE_DATA_STAT BIT(6)
@@ -120,33 +135,45 @@
#define AD7173_VOLTAGE_INT_REF_uV 2500000
#define AD7173_TEMP_SENSIIVITY_uV_per_C 477
#define AD7177_ODR_START_VALUE 0x07
+#define AD4111_SHUNT_RESISTOR_OHM 50
+#define AD4111_DIVIDER_RATIO 10
+#define AD4111_CURRENT_CHAN_CUTOFF 16
+#define AD4111_VINCOM_INPUT 0x10
+
+/* pin < num_voltage_in is a normal voltage input */
+/* pin >= num_voltage_in_div is a voltage input without a divider */
+#define AD4111_IS_VINCOM_MISMATCH(pin1, pin2) ((pin1) == AD4111_VINCOM_INPUT && \
+ (pin2) < st->info->num_voltage_in && \
+ (pin2) >= st->info->num_voltage_in_div)
#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
-enum ad7173_ids {
- ID_AD7172_2,
- ID_AD7172_4,
- ID_AD7173_8,
- ID_AD7175_2,
- ID_AD7175_8,
- ID_AD7176_2,
- ID_AD7177_2,
-};
-
struct ad7173_device_info {
const unsigned int *sinc5_data_rates;
unsigned int num_sinc5_data_rates;
unsigned int odr_start_value;
+ /*
+ * AD4116 has both inputs with a voltage divider and without.
+ * These inputs cannot be mixed in the channel configuration.
+ * Does not include the VINCOM input.
+ */
+ unsigned int num_voltage_in_div;
unsigned int num_channels;
unsigned int num_configs;
- unsigned int num_inputs;
+ unsigned int num_voltage_in;
unsigned int clock;
unsigned int id;
char *name;
+ bool has_current_inputs;
+ bool has_vincom_input;
bool has_temp;
+ /* ((AVDD1 − AVSS)/5) */
+ bool has_pow_supply_monitoring;
+ bool has_input_buf;
bool has_int_ref;
bool has_ref2;
+ bool higher_gpio_bits;
u8 num_gpios;
};
@@ -188,6 +215,24 @@ struct ad7173_state {
#endif
};
+static unsigned int ad4115_sinc5_data_rates[] = {
+ 24845000, 24845000, 20725000, 20725000, /* 0-3 */
+ 15564000, 13841000, 10390000, 10390000, /* 4-7 */
+ 4994000, 2499000, 1000000, 500000, /* 8-11 */
+ 395500, 200000, 100000, 59890, /* 12-15 */
+ 49920, 20000, 16660, 10000, /* 16-19 */
+ 5000, 2500, 2500, /* 20-22 */
+};
+
+static unsigned int ad4116_sinc5_data_rates[] = {
+ 12422360, 12422360, 12422360, 12422360, /* 0-3 */
+ 10362690, 10362690, 7782100, 6290530, /* 4-7 */
+ 5194800, 2496900, 1007600, 499900, /* 8-11 */
+ 390600, 200300, 100000, 59750, /* 12-15 */
+ 49840, 20000, 16650, 10000, /* 16-19 */
+ 5000, 2500, 1250, /* 20-22 */
+};
+
static const unsigned int ad7173_sinc5_data_rates[] = {
6211000, 6211000, 6211000, 6211000, 6211000, 6211000, 5181000, 4444000, /* 0-7 */
3115000, 2597000, 1007000, 503800, 381000, 200300, 100500, 59520, /* 8-15 */
@@ -203,98 +248,214 @@ static const unsigned int ad7175_sinc5_data_rates[] = {
5000, /* 20 */
};
-static const struct ad7173_device_info ad7173_device_info[] = {
- [ID_AD7172_2] = {
- .name = "ad7172-2",
- .id = AD7172_2_ID,
- .num_inputs = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_int_ref = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
- },
- [ID_AD7172_4] = {
- .id = AD7172_4_ID,
- .num_inputs = 9,
- .num_channels = 8,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = false,
- .has_ref2 = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
- },
- [ID_AD7173_8] = {
- .name = "ad7173-8",
- .id = AD7173_ID,
- .num_inputs = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
- },
- [ID_AD7175_2] = {
- .name = "ad7175-2",
- .id = AD7175_2_ID,
- .num_inputs = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_int_ref = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
- },
- [ID_AD7175_8] = {
- .id = AD7175_8_ID,
- .num_inputs = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
- },
- [ID_AD7176_2] = {
- .name = "ad7176-2",
- .id = AD7176_ID,
- .num_inputs = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = false,
- .has_int_ref = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
- },
- [ID_AD7177_2] = {
- .id = AD7177_ID,
- .num_inputs = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_int_ref = true,
- .clock = 16 * HZ_PER_MHZ,
- .odr_start_value = AD7177_ODR_START_VALUE,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
- },
+static unsigned int ad4111_current_channel_config[] = {
+ /* Ain sel: pos neg */
+ 0x1E8, /* 15:IIN0+ 8:IIN0− */
+ 0x1C9, /* 14:IIN1+ 9:IIN1− */
+ 0x1AA, /* 13:IIN2+ 10:IIN2− */
+ 0x18B, /* 12:IIN3+ 11:IIN3− */
+};
+
+static const struct ad7173_device_info ad4111_device_info = {
+ .name = "ad4111",
+ .id = AD4111_ID,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_temp = true,
+ .has_vincom_input = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4112_device_info = {
+ .name = "ad4112",
+ .id = AD4112_ID,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4114_device_info = {
+ .name = "ad4114",
+ .id = AD4114_ID,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4115_device_info = {
+ .name = "ad4115",
+ .id = AD4115_ID,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .clock = 8 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4115_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4116_device_info = {
+ .name = "ad4116",
+ .id = AD4116_ID,
+ .num_voltage_in_div = 11,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .clock = 4 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4116_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_2_device_info = {
+ .name = "ad7172-2",
+ .id = AD7172_2_ID,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_4_device_info = {
+ .name = "ad7172-4",
+ .id = AD7172_4_ID,
+ .num_voltage_in = 9,
+ .num_channels = 8,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_input_buf = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7173_8_device_info = {
+ .name = "ad7173-8",
+ .id = AD7173_ID,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_2_device_info = {
+ .name = "ad7175-2",
+ .id = AD7175_2_ID,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_8_device_info = {
+ .name = "ad7175-8",
+ .id = AD7175_8_ID,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7176_2_device_info = {
+ .name = "ad7176-2",
+ .id = AD7176_ID,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_int_ref = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7177_2_device_info = {
+ .name = "ad7177-2",
+ .id = AD7177_ID,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .odr_start_value = AD7177_ODR_START_VALUE,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
};
static const char *const ad7173_ref_sel_str[] = {
@@ -336,6 +497,15 @@ static int ad7173_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
return 0;
}
+static int ad4111_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask)
+{
+ *mask = AD4111_GPO01_DATA(offset);
+ *reg = base;
+ return 0;
+}
+
static void ad7173_gpio_disable(void *data)
{
struct ad7173_state *st = data;
@@ -368,7 +538,10 @@ static int ad7173_gpio_init(struct ad7173_state *st)
gpio_regmap.regmap = st->reg_gpiocon_regmap;
gpio_regmap.ngpio = st->info->num_gpios;
gpio_regmap.reg_set_base = AD7173_REG_GPIO;
- gpio_regmap.reg_mask_xlate = ad7173_mask_xlate;
+ if (st->info->higher_gpio_bits)
+ gpio_regmap.reg_mask_xlate = ad4111_mask_xlate;
+ else
+ gpio_regmap.reg_mask_xlate = ad7173_mask_xlate;
st->gpio_regmap = devm_gpio_regmap_register(dev, &gpio_regmap);
ret = PTR_ERR_OR_ZERO(st->gpio_regmap);
@@ -532,6 +705,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append)
unsigned int interface_mode = st->interface_mode;
int ret;
+ interface_mode &= ~AD7173_INTERFACE_DATA_STAT;
interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append);
ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode);
if (ret)
@@ -557,10 +731,16 @@ static int ad7173_disable_all(struct ad_sigma_delta *sd)
return 0;
}
+static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
+{
+ return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
+}
+
static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
+ .disable_one = ad7173_disable_one,
.set_mode = ad7173_set_mode,
.has_registers = true,
.addr_shift = 0,
@@ -656,25 +836,35 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- /* disable channel after single conversion */
- ret = ad_sd_write_reg(&st->sd, AD7173_REG_CH(chan->address), 2, 0);
- if (ret < 0)
- return ret;
-
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_TEMP) {
+
+ switch (chan->type) {
+ case IIO_TEMP:
temp = AD7173_VOLTAGE_INT_REF_uV * MILLI;
temp /= AD7173_TEMP_SENSIIVITY_uV_per_C;
*val = temp;
*val2 = chan->scan_type.realbits;
- } else {
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_VOLTAGE:
*val = ad7173_get_ref_voltage_milli(st, ch->cfg.ref_sel);
*val2 = chan->scan_type.realbits - !!(ch->cfg.bipolar);
+
+ if (chan->channel < st->info->num_voltage_in_div)
+ *val *= AD4111_DIVIDER_RATIO;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CURRENT:
+ *val = ad7173_get_ref_voltage_milli(st, ch->cfg.ref_sel);
+ *val /= AD4111_SHUNT_RESISTOR_OHM;
+ *val2 = chan->scan_type.realbits - ch->cfg.bipolar;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
}
- return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
+
+ switch (chan->type) {
+ case IIO_TEMP:
/* 0 Kelvin -> raw sample */
temp = -ABSOLUTE_ZERO_MILLICELSIUS;
temp *= AD7173_TEMP_SENSIIVITY_uV_per_C;
@@ -683,10 +873,14 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
AD7173_VOLTAGE_INT_REF_uV *
MILLI);
*val = -temp;
- } else {
+ return IIO_VAL_INT;
+ case IIO_VOLTAGE:
+ case IIO_CURRENT:
*val = -BIT(chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
}
- return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
reg = st->channels[chan->address].cfg.odr;
@@ -705,7 +899,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
{
struct ad7173_state *st = iio_priv(indio_dev);
struct ad7173_channel_config *cfg;
- unsigned int freq, i, reg;
+ unsigned int freq, i;
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -713,6 +907,21 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
return ret;
switch (info) {
+ /*
+ * This attribute sets the sampling frequency for each channel individually.
+ * There are no issues for raw or buffered reads of an individual channel.
+ *
+ * When multiple channels are enabled in buffered mode, the effective
+ * sampling rate of a channel is lowered in correlation to the number
+ * of channels enabled and the sampling rate of the other channels.
+ *
+ * Example: 3 channels enabled with rates CH1:6211sps CH2,CH3:10sps
+ * While the reading of CH1 takes only 0.16ms, the reading of CH2 and CH3
+ * will take 100ms each.
+ *
+ * This will cause the reading of CH1 to be actually done once every
+ * 200.16ms, an effective rate of 4.99sps.
+ */
case IIO_CHAN_INFO_SAMP_FREQ:
freq = val * MILLI + val2 / MILLI;
for (i = st->info->odr_start_value; i < st->info->num_sinc5_data_rates - 1; i++)
@@ -721,16 +930,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
cfg = &st->channels[chan->address].cfg;
cfg->odr = i;
-
- if (!cfg->live)
- break;
-
- ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, &reg);
- if (ret)
- break;
- reg &= ~AD7173_FILTER_ODR0_MASK;
- reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i);
- ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg);
+ cfg->live = false;
break;
default:
@@ -792,8 +992,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.type = IIO_VOLTAGE,
.indexed = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -804,12 +1003,11 @@ static const struct iio_chan_spec ad7173_channel_template = {
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
.type = IIO_TEMP,
- .indexed = 1,
.channel = AD7173_AIN_TEMP_POS,
.channel2 = AD7173_AIN_TEMP_NEG,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -903,14 +1101,110 @@ static int ad7173_register_clk_provider(struct iio_dev *indio_dev)
&st->int_clk_hw);
}
+static int ad4111_validate_current_ain(struct ad7173_state *st,
+ const unsigned int ain[AD7173_NO_AINS_PER_CHANNEL])
+{
+ struct device *dev = &st->sd.spi->dev;
+
+ if (!st->info->has_current_inputs)
+ return dev_err_probe(dev, -EINVAL,
+ "Model %s does not support current channels\n",
+ st->info->name);
+
+ if (ain[0] >= ARRAY_SIZE(ad4111_current_channel_config))
+ return dev_err_probe(dev, -EINVAL,
+ "For current channels single-channel must be <[0-3]>\n");
+
+ return 0;
+}
+
+static int ad7173_validate_voltage_ain_inputs(struct ad7173_state *st,
+ unsigned int ain0, unsigned int ain1)
+{
+ struct device *dev = &st->sd.spi->dev;
+ bool special_input0, special_input1;
+
+ /* (AVDD1-AVSS)/5 power supply monitoring */
+ if (ain0 == AD7173_AIN_POW_MON_POS && ain1 == AD7173_AIN_POW_MON_NEG &&
+ st->info->has_pow_supply_monitoring)
+ return 0;
+
+ special_input0 = AD7173_IS_REF_INPUT(ain0) ||
+ (ain0 == AD4111_VINCOM_INPUT && st->info->has_vincom_input);
+ special_input1 = AD7173_IS_REF_INPUT(ain1) ||
+ (ain1 == AD4111_VINCOM_INPUT && st->info->has_vincom_input);
+
+ if ((ain0 >= st->info->num_voltage_in && !special_input0) ||
+ (ain1 >= st->info->num_voltage_in && !special_input1)) {
+ if (ain0 == AD4111_VINCOM_INPUT || ain1 == AD4111_VINCOM_INPUT)
+ return dev_err_probe(dev, -EINVAL,
+ "VINCOM not supported for %s\n", st->info->name);
+
+ return dev_err_probe(dev, -EINVAL,
+ "Input pin number out of range for pair (%d %d).\n",
+ ain0, ain1);
+ }
+
+ if (AD4111_IS_VINCOM_MISMATCH(ain0, ain1) ||
+ AD4111_IS_VINCOM_MISMATCH(ain1, ain0))
+ return dev_err_probe(dev, -EINVAL,
+ "VINCOM must be paired with inputs having divider.\n");
+
+ if (!special_input0 && !special_input1 &&
+ ((ain0 >= st->info->num_voltage_in_div) !=
+ (ain1 >= st->info->num_voltage_in_div)))
+ return dev_err_probe(dev, -EINVAL,
+ "Both inputs must either have a voltage divider or not have: (%d %d).\n",
+ ain0, ain1);
+
+ return 0;
+}
+
+static int ad7173_validate_reference(struct ad7173_state *st, int ref_sel)
+{
+ struct device *dev = &st->sd.spi->dev;
+ int ret;
+
+ if (ref_sel == AD7173_SETUP_REF_SEL_INT_REF && !st->info->has_int_ref)
+ return dev_err_probe(dev, -EINVAL,
+ "Internal reference is not available on current model.\n");
+
+ if (ref_sel == AD7173_SETUP_REF_SEL_EXT_REF2 && !st->info->has_ref2)
+ return dev_err_probe(dev, -EINVAL,
+ "External reference 2 is not available on current model.\n");
+
+ ret = ad7173_get_ref_voltage_milli(st, ref_sel);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot use reference %u\n",
+ ref_sel);
+
+ return 0;
+}
+
static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
{
struct ad7173_channel *chans_st_arr, *chan_st_priv;
struct ad7173_state *st = iio_priv(indio_dev);
struct device *dev = indio_dev->dev.parent;
struct iio_chan_spec *chan_arr, *chan;
- unsigned int ain[2], chan_index = 0;
- int ref_sel, ret;
+ unsigned int ain[AD7173_NO_AINS_PER_CHANNEL], chan_index = 0;
+ int ref_sel, ret, num_channels;
+
+ num_channels = device_get_child_node_count(dev);
+
+ if (st->info->has_temp)
+ num_channels++;
+
+ if (num_channels == 0)
+ return dev_err_probe(dev, -ENODATA, "No channels specified\n");
+
+ if (num_channels > st->info->num_channels)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many channels specified. Maximum is %d, not including temperature channel if supported.\n",
+ st->info->num_channels);
+
+ indio_dev->num_channels = num_channels;
+ st->num_channels = num_channels;
chan_arr = devm_kcalloc(dev, sizeof(*indio_dev->channels),
st->num_channels, GFP_KERNEL);
@@ -932,7 +1226,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
AD7173_CH_ADDRESS(chan_arr[chan_index].channel,
chan_arr[chan_index].channel2);
chan_st_priv->cfg.bipolar = false;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
@@ -940,18 +1234,41 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
}
device_for_each_child_node_scoped(dev, child) {
+ bool is_current_chan = false;
+
chan = &chan_arr[chan_index];
+ *chan = ad7173_channel_template;
chan_st_priv = &chans_st_arr[chan_index];
ret = fwnode_property_read_u32_array(child, "diff-channels",
ain, ARRAY_SIZE(ain));
- if (ret)
- return ret;
+ if (ret) {
+ ret = fwnode_property_read_u32(child, "single-channel",
+ ain);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Channel must define one of diff-channels or single-channel.\n");
- if (ain[0] >= st->info->num_inputs ||
- ain[1] >= st->info->num_inputs)
- return dev_err_probe(dev, -EINVAL,
- "Input pin number out of range for pair (%d %d).\n",
- ain[0], ain[1]);
+ is_current_chan = fwnode_property_read_bool(child, "adi,current-channel");
+ } else {
+ chan->differential = true;
+ }
+
+ if (is_current_chan) {
+ ret = ad4111_validate_current_ain(st, ain);
+ if (ret)
+ return ret;
+ } else {
+ if (!chan->differential) {
+ ret = fwnode_property_read_u32(child,
+ "common-mode-channel", ain + 1);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "common-mode-channel must be defined for single-ended channels.\n");
+ }
+ ret = ad7173_validate_voltage_ain_inputs(st, ain[0], ain[1]);
+ if (ret)
+ return ret;
+ }
ret = fwnode_property_match_property_string(child,
"adi,reference-select",
@@ -962,40 +1279,36 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
else
ref_sel = ret;
- if (ref_sel == AD7173_SETUP_REF_SEL_INT_REF &&
- !st->info->has_int_ref)
- return dev_err_probe(dev, -EINVAL,
- "Internal reference is not available on current model.\n");
-
- if (ref_sel == AD7173_SETUP_REF_SEL_EXT_REF2 && !st->info->has_ref2)
- return dev_err_probe(dev, -EINVAL,
- "External reference 2 is not available on current model.\n");
-
- ret = ad7173_get_ref_voltage_milli(st, ref_sel);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "Cannot use reference %u\n", ref_sel);
+ ret = ad7173_validate_reference(st, ref_sel);
+ if (ret)
+ return ret;
if (ref_sel == AD7173_SETUP_REF_SEL_INT_REF)
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
chan_st_priv->cfg.ref_sel = ref_sel;
- *chan = ad7173_channel_template;
chan->address = chan_index;
chan->scan_index = chan_index;
chan->channel = ain[0];
- chan->channel2 = ain[1];
- chan->differential = true;
-
- chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
chan_st_priv->chan_reg = chan_index;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
if (chan_st_priv->cfg.bipolar)
chan->info_mask_separate |= BIT(IIO_CHAN_INFO_OFFSET);
+ if (is_current_chan) {
+ chan->type = IIO_CURRENT;
+ chan->differential = false;
+ chan->channel2 = 0;
+ chan_st_priv->ain = ad4111_current_channel_config[ain[0]];
+ } else {
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
+ chan->channel2 = ain[1];
+ chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
+ }
+
chan_index++;
}
return 0;
@@ -1005,7 +1318,6 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
{
struct ad7173_state *st = iio_priv(indio_dev);
struct device *dev = indio_dev->dev.parent;
- unsigned int num_channels;
int ret;
st->regulators[0].supply = ad7173_ref_sel_str[AD7173_SETUP_REF_SEL_EXT_REF];
@@ -1064,16 +1376,6 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
ad7173_sigma_delta_info.irq_line = ret;
- num_channels = device_get_child_node_count(dev);
-
- if (st->info->has_temp)
- num_channels++;
-
- if (num_channels == 0)
- return dev_err_probe(dev, -ENODATA, "No channels specified\n");
- indio_dev->num_channels = num_channels;
- st->num_channels = num_channels;
-
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1133,32 +1435,35 @@ static int ad7173_probe(struct spi_device *spi)
}
static const struct of_device_id ad7173_of_match[] = {
- { .compatible = "adi,ad7172-2",
- .data = &ad7173_device_info[ID_AD7172_2]},
- { .compatible = "adi,ad7172-4",
- .data = &ad7173_device_info[ID_AD7172_4]},
- { .compatible = "adi,ad7173-8",
- .data = &ad7173_device_info[ID_AD7173_8]},
- { .compatible = "adi,ad7175-2",
- .data = &ad7173_device_info[ID_AD7175_2]},
- { .compatible = "adi,ad7175-8",
- .data = &ad7173_device_info[ID_AD7175_8]},
- { .compatible = "adi,ad7176-2",
- .data = &ad7173_device_info[ID_AD7176_2]},
- { .compatible = "adi,ad7177-2",
- .data = &ad7173_device_info[ID_AD7177_2]},
+ { .compatible = "ad4111", .data = &ad4111_device_info },
+ { .compatible = "ad4112", .data = &ad4112_device_info },
+ { .compatible = "ad4114", .data = &ad4114_device_info },
+ { .compatible = "ad4115", .data = &ad4115_device_info },
+ { .compatible = "ad4116", .data = &ad4116_device_info },
+ { .compatible = "adi,ad7172-2", .data = &ad7172_2_device_info },
+ { .compatible = "adi,ad7172-4", .data = &ad7172_4_device_info },
+ { .compatible = "adi,ad7173-8", .data = &ad7173_8_device_info },
+ { .compatible = "adi,ad7175-2", .data = &ad7175_2_device_info },
+ { .compatible = "adi,ad7175-8", .data = &ad7175_8_device_info },
+ { .compatible = "adi,ad7176-2", .data = &ad7176_2_device_info },
+ { .compatible = "adi,ad7177-2", .data = &ad7177_2_device_info },
{ }
};
MODULE_DEVICE_TABLE(of, ad7173_of_match);
static const struct spi_device_id ad7173_id_table[] = {
- { "ad7172-2", (kernel_ulong_t)&ad7173_device_info[ID_AD7172_2]},
- { "ad7172-4", (kernel_ulong_t)&ad7173_device_info[ID_AD7172_4]},
- { "ad7173-8", (kernel_ulong_t)&ad7173_device_info[ID_AD7173_8]},
- { "ad7175-2", (kernel_ulong_t)&ad7173_device_info[ID_AD7175_2]},
- { "ad7175-8", (kernel_ulong_t)&ad7173_device_info[ID_AD7175_8]},
- { "ad7176-2", (kernel_ulong_t)&ad7173_device_info[ID_AD7176_2]},
- { "ad7177-2", (kernel_ulong_t)&ad7173_device_info[ID_AD7177_2]},
+ { "ad4111", (kernel_ulong_t)&ad4111_device_info },
+ { "ad4112", (kernel_ulong_t)&ad4112_device_info },
+ { "ad4114", (kernel_ulong_t)&ad4114_device_info },
+ { "ad4115", (kernel_ulong_t)&ad4115_device_info },
+ { "ad4116", (kernel_ulong_t)&ad4116_device_info },
+ { "ad7172-2", (kernel_ulong_t)&ad7172_2_device_info },
+ { "ad7172-4", (kernel_ulong_t)&ad7172_4_device_info },
+ { "ad7173-8", (kernel_ulong_t)&ad7173_8_device_info },
+ { "ad7175-2", (kernel_ulong_t)&ad7175_2_device_info },
+ { "ad7175-8", (kernel_ulong_t)&ad7175_8_device_info },
+ { "ad7176-2", (kernel_ulong_t)&ad7176_2_device_info },
+ { "ad7177-2", (kernel_ulong_t)&ad7177_2_device_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7173_id_table);
@@ -1176,5 +1481,5 @@ module_spi_driver(ad7173_driver);
MODULE_IMPORT_NS(IIO_AD_SIGMA_DELTA);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafo.de>");
MODULE_AUTHOR("Dumitru Ceclan <dumitru.ceclan@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD7172/AD7173/AD7175/AD7176 ADC driver");
+MODULE_DESCRIPTION("Analog Devices AD7173 and similar ADC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 7bcc7e2aa2a2..334ab90991d4 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * AD7190 AD7192 AD7193 AD7195 SPI ADC driver
+ * AD7192 and similar SPI ADC driver
*
* Copyright 2011-2015 Analog Devices Inc.
*/
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -128,10 +129,24 @@
#define AD7193_CH_AIN8 0x480 /* AIN7 - AINCOM */
#define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */
+#define AD7194_CH_POS(x) (((x) - 1) << 4)
+#define AD7194_CH_NEG(x) ((x) - 1)
+
+/* 10th bit corresponds to CON18(Pseudo) */
+#define AD7194_CH(p) (BIT(10) | AD7194_CH_POS(p))
+
+#define AD7194_DIFF_CH(p, n) (AD7194_CH_POS(p) | AD7194_CH_NEG(n))
+#define AD7194_CH_TEMP 0x100
+#define AD7194_CH_BASE_NR 2
+#define AD7194_CH_AIN_START 1
+#define AD7194_CH_AIN_NR 16
+#define AD7194_CH_MAX_NR 272
+
/* ID Register Bit Designations (AD7192_REG_ID) */
#define CHIPID_AD7190 0x4
#define CHIPID_AD7192 0x0
#define CHIPID_AD7193 0x2
+#define CHIPID_AD7194 0x3
#define CHIPID_AD7195 0x6
#define AD7192_ID_MASK GENMASK(3, 0)
@@ -169,6 +184,7 @@ enum {
ID_AD7190,
ID_AD7192,
ID_AD7193,
+ ID_AD7194,
ID_AD7195,
};
@@ -177,19 +193,21 @@ struct ad7192_chip_info {
const char *name;
const struct iio_chan_spec *channels;
u8 num_channels;
+ const struct ad_sigma_delta_info *sigma_delta_info;
const struct iio_info *info;
+ int (*parse_channels)(struct iio_dev *indio_dev);
};
struct ad7192_state {
const struct ad7192_chip_info *chip_info;
- struct regulator *avdd;
- struct regulator *vref;
struct clk *mclk;
u16 int_vref_mv;
+ u32 aincom_mv;
u32 fclk;
u32 mode;
u32 conf;
u32 scale_avail[8][2];
+ u32 filter_freq_avail[4][2];
u32 oversampling_ratio_avail[4];
u8 gpocon;
u8 clock_sel;
@@ -343,6 +361,18 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.irq_flags = IRQF_TRIGGER_FALLING,
};
+static const struct ad_sigma_delta_info ad7194_sigma_delta_info = {
+ .set_channel = ad7192_set_channel,
+ .append_status = ad7192_append_status,
+ .disable_all = ad7192_disable_all,
+ .set_mode = ad7192_set_mode,
+ .has_registers = true,
+ .addr_shift = 3,
+ .read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
+ .irq_flags = IRQF_TRIGGER_FALLING,
+};
+
static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
{AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN1},
{AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN1},
@@ -473,6 +503,16 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
st->oversampling_ratio_avail[2] = 8;
st->oversampling_ratio_avail[3] = 16;
+ st->filter_freq_avail[0][0] = 600;
+ st->filter_freq_avail[1][0] = 800;
+ st->filter_freq_avail[2][0] = 2300;
+ st->filter_freq_avail[3][0] = 2720;
+
+ st->filter_freq_avail[0][1] = 1000;
+ st->filter_freq_avail[1][1] = 1000;
+ st->filter_freq_avail[2][1] = 1000;
+ st->filter_freq_avail[3][1] = 1000;
+
return 0;
}
@@ -586,48 +626,24 @@ static int ad7192_get_f_adc(struct ad7192_state *st)
f_order * FIELD_GET(AD7192_MODE_RATE_MASK, st->mode));
}
-static void ad7192_get_available_filter_freq(struct ad7192_state *st,
- int *freq)
+static void ad7192_update_filter_freq_avail(struct ad7192_state *st)
{
unsigned int fadc;
/* Formulas for filter at page 25 of the datasheet */
fadc = ad7192_compute_f_adc(st, false, true);
- freq[0] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
+ st->filter_freq_avail[0][0] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
fadc = ad7192_compute_f_adc(st, true, true);
- freq[1] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
+ st->filter_freq_avail[1][0] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
fadc = ad7192_compute_f_adc(st, false, false);
- freq[2] = DIV_ROUND_CLOSEST(fadc * 230, 1024);
+ st->filter_freq_avail[2][0] = DIV_ROUND_CLOSEST(fadc * 230, 1024);
fadc = ad7192_compute_f_adc(st, true, false);
- freq[3] = DIV_ROUND_CLOSEST(fadc * 272, 1024);
+ st->filter_freq_avail[3][0] = DIV_ROUND_CLOSEST(fadc * 272, 1024);
}
-static ssize_t ad7192_show_filter_avail(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- unsigned int freq_avail[4], i;
- size_t len = 0;
-
- ad7192_get_available_filter_freq(st, freq_avail);
-
- for (i = 0; i < ARRAY_SIZE(freq_avail); i++)
- len += sysfs_emit_at(buf, len, "%d.%03d ", freq_avail[i] / 1000,
- freq_avail[i] % 1000);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(filter_low_pass_3db_frequency_available,
- 0444, ad7192_show_filter_avail, NULL, 0);
-
static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
ad7192_show_bridge_switch, ad7192_set,
AD7192_REG_GPOCON);
@@ -637,7 +653,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
AD7192_REG_CONF);
static struct attribute *ad7192_attributes[] = {
- &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
NULL
};
@@ -647,7 +662,6 @@ static const struct attribute_group ad7192_attribute_group = {
};
static struct attribute *ad7195_attributes[] = {
- &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
&iio_dev_attr_ac_excitation_en.dev_attr.attr,
NULL
@@ -665,17 +679,15 @@ static unsigned int ad7192_get_temp_scale(bool unipolar)
static int ad7192_set_3db_filter_freq(struct ad7192_state *st,
int val, int val2)
{
- int freq_avail[4], i, ret, freq;
+ int i, ret, freq;
unsigned int diff_new, diff_old;
int idx = 0;
diff_old = U32_MAX;
freq = val * 1000 + val2;
- ad7192_get_available_filter_freq(st, freq_avail);
-
- for (i = 0; i < ARRAY_SIZE(freq_avail); i++) {
- diff_new = abs(freq - freq_avail[i]);
+ for (i = 0; i < ARRAY_SIZE(st->filter_freq_avail); i++) {
+ diff_new = abs(freq - st->filter_freq_avail[i][0]);
if (diff_new < diff_old) {
diff_old = diff_new;
idx = i;
@@ -759,10 +771,24 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
*val = -(1 << (chan->scan_type.realbits - 1));
else
*val = 0;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /*
+ * Only applies to pseudo-differential inputs.
+ * AINCOM voltage has to be converted to "raw" units.
+ */
+ if (st->aincom_mv && !chan->differential)
+ *val += DIV_ROUND_CLOSEST_ULL((u64)st->aincom_mv * NANO,
+ st->scale_avail[gain][1]);
+ return IIO_VAL_INT;
/* Kelvin to Celsius */
- if (chan->type == IIO_TEMP)
+ case IIO_TEMP:
*val -= 273 * ad7192_get_temp_scale(unipolar);
- return IIO_VAL_INT;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
*val = DIV_ROUND_CLOSEST(ad7192_get_f_adc(st), 1024);
return IIO_VAL_INT;
@@ -792,10 +818,11 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
+ mutex_lock(&st->lock);
+
switch (mask) {
case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
- mutex_lock(&st->lock);
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
ret = 0;
@@ -809,7 +836,6 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ad7192_calibrate_all(st);
break;
}
- mutex_unlock(&st->lock);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val) {
@@ -826,13 +852,13 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
st->mode &= ~AD7192_MODE_RATE_MASK;
st->mode |= FIELD_PREP(AD7192_MODE_RATE_MASK, div);
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ ad7192_update_filter_freq_avail(st);
break;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
ret = ad7192_set_3db_filter_freq(st, val, val2 / 1000);
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = -EINVAL;
- mutex_lock(&st->lock);
for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++)
if (val == st->oversampling_ratio_avail[i]) {
ret = 0;
@@ -845,12 +871,14 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
3, st->mode);
break;
}
- mutex_unlock(&st->lock);
+ ad7192_update_filter_freq_avail(st);
break;
default:
ret = -EINVAL;
}
+ mutex_unlock(&st->lock);
+
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -889,6 +917,12 @@ static int ad7192_read_avail(struct iio_dev *indio_dev,
*length = ARRAY_SIZE(st->scale_avail) * 2;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (int *)st->filter_freq_avail;
+ *type = IIO_VAL_FRACTIONAL;
+ *length = ARRAY_SIZE(st->filter_freq_avail) * 2;
+
+ return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*vals = (int *)st->oversampling_ratio_avail;
*type = IIO_VAL_INT;
@@ -930,6 +964,14 @@ static const struct iio_info ad7192_info = {
.update_scan_mode = ad7192_update_scan_mode,
};
+static const struct iio_info ad7194_info = {
+ .read_raw = ad7192_read_raw,
+ .write_raw = ad7192_write_raw,
+ .write_raw_get_fmt = ad7192_write_raw_get_fmt,
+ .read_avail = ad7192_read_avail,
+ .validate_trigger = ad_sd_validate_trigger,
+};
+
static const struct iio_info ad7195_info = {
.read_raw = ad7192_read_raw,
.write_raw = ad7192_write_raw,
@@ -956,7 +998,9 @@ static const struct iio_info ad7195_info = {
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
(_mask_all), \
.info_mask_shared_by_type_available = (_mask_type_av), \
- .info_mask_shared_by_all_available = (_mask_all_av), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ (_mask_all_av), \
.ext_info = (_ext_info), \
.scan_index = (_si), \
.scan_type = { \
@@ -1019,12 +1063,95 @@ static const struct iio_chan_spec ad7193_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(14),
};
+static bool ad7194_validate_ain_channel(struct device *dev, u32 ain)
+{
+ return in_range(ain, AD7194_CH_AIN_START, AD7194_CH_AIN_NR);
+}
+
+static int ad7194_parse_channels(struct iio_dev *indio_dev)
+{
+ struct device *dev = indio_dev->dev.parent;
+ struct iio_chan_spec *ad7194_channels;
+ const struct iio_chan_spec ad7194_chan = AD7193_CHANNEL(0, 0, 0);
+ const struct iio_chan_spec ad7194_chan_diff = AD7193_DIFF_CHANNEL(0, 0, 0, 0);
+ const struct iio_chan_spec ad7194_chan_temp = AD719x_TEMP_CHANNEL(0, 0);
+ const struct iio_chan_spec ad7194_chan_timestamp = IIO_CHAN_SOFT_TIMESTAMP(0);
+ unsigned int num_channels, index = 0;
+ u32 ain[2];
+ int ret;
+
+ num_channels = device_get_child_node_count(dev);
+ if (num_channels > AD7194_CH_MAX_NR)
+ return dev_err_probe(dev, -EINVAL, "Too many channels: %u\n",
+ num_channels);
+
+ num_channels += AD7194_CH_BASE_NR;
+
+ ad7194_channels = devm_kcalloc(dev, num_channels,
+ sizeof(*ad7194_channels), GFP_KERNEL);
+ if (!ad7194_channels)
+ return -ENOMEM;
+
+ indio_dev->channels = ad7194_channels;
+ indio_dev->num_channels = num_channels;
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32_array(child, "diff-channels",
+ ain, ARRAY_SIZE(ain));
+ if (ret == 0) {
+ if (!ad7194_validate_ain_channel(dev, ain[0]))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid AIN channel: %u\n",
+ ain[0]);
+
+ if (!ad7194_validate_ain_channel(dev, ain[1]))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid AIN channel: %u\n",
+ ain[1]);
+
+ *ad7194_channels = ad7194_chan_diff;
+ ad7194_channels->scan_index = index++;
+ ad7194_channels->channel = ain[0];
+ ad7194_channels->channel2 = ain[1];
+ ad7194_channels->address = AD7194_DIFF_CH(ain[0], ain[1]);
+ } else {
+ ret = fwnode_property_read_u32(child, "single-channel",
+ &ain[0]);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Missing channel property\n");
+
+ if (!ad7194_validate_ain_channel(dev, ain[0]))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid AIN channel: %u\n",
+ ain[0]);
+
+ *ad7194_channels = ad7194_chan;
+ ad7194_channels->scan_index = index++;
+ ad7194_channels->channel = ain[0];
+ ad7194_channels->address = AD7194_CH(ain[0]);
+ }
+ ad7194_channels++;
+ }
+
+ *ad7194_channels = ad7194_chan_temp;
+ ad7194_channels->scan_index = index++;
+ ad7194_channels->address = AD7194_CH_TEMP;
+ ad7194_channels++;
+
+ *ad7194_channels = ad7194_chan_timestamp;
+ ad7194_channels->scan_index = index;
+
+ return 0;
+}
+
static const struct ad7192_chip_info ad7192_chip_info_tbl[] = {
[ID_AD7190] = {
.chip_id = CHIPID_AD7190,
.name = "ad7190",
.channels = ad7192_channels,
.num_channels = ARRAY_SIZE(ad7192_channels),
+ .sigma_delta_info = &ad7192_sigma_delta_info,
.info = &ad7192_info,
},
[ID_AD7192] = {
@@ -1032,6 +1159,7 @@ static const struct ad7192_chip_info ad7192_chip_info_tbl[] = {
.name = "ad7192",
.channels = ad7192_channels,
.num_channels = ARRAY_SIZE(ad7192_channels),
+ .sigma_delta_info = &ad7192_sigma_delta_info,
.info = &ad7192_info,
},
[ID_AD7193] = {
@@ -1039,34 +1167,37 @@ static const struct ad7192_chip_info ad7192_chip_info_tbl[] = {
.name = "ad7193",
.channels = ad7193_channels,
.num_channels = ARRAY_SIZE(ad7193_channels),
+ .sigma_delta_info = &ad7192_sigma_delta_info,
.info = &ad7192_info,
},
+ [ID_AD7194] = {
+ .chip_id = CHIPID_AD7194,
+ .name = "ad7194",
+ .info = &ad7194_info,
+ .sigma_delta_info = &ad7194_sigma_delta_info,
+ .parse_channels = ad7194_parse_channels,
+ },
[ID_AD7195] = {
.chip_id = CHIPID_AD7195,
.name = "ad7195",
.channels = ad7192_channels,
.num_channels = ARRAY_SIZE(ad7192_channels),
+ .sigma_delta_info = &ad7192_sigma_delta_info,
.info = &ad7195_info,
},
};
-static void ad7192_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad7192_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct ad7192_state *st;
struct iio_dev *indio_dev;
- int ret;
+ int ret, avdd_mv;
- if (!spi->irq) {
- dev_err(&spi->dev, "no IRQ?\n");
- return -ENODEV;
- }
+ if (!spi->irq)
+ return dev_err_probe(dev, -ENODEV, "Failed to get IRQ\n");
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -1074,69 +1205,79 @@ static int ad7192_probe(struct spi_device *spi)
mutex_init(&st->lock);
- st->avdd = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->avdd))
- return PTR_ERR(st->avdd);
-
- ret = regulator_enable(st->avdd);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
- return ret;
+ /*
+ * Regulator aincom is optional to maintain compatibility with older DT.
+ * Newer firmware should provide a zero volt fixed supply if wired to
+ * ground.
+ */
+ ret = devm_regulator_get_enable_read_voltage(dev, "aincom");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get AINCOM voltage\n");
+
+ st->aincom_mv = ret == -ENODEV ? 0 : ret / MILLI;
+
+ /* AVDD can optionally be used as reference voltage */
+ ret = devm_regulator_get_enable_read_voltage(dev, "avdd");
+ if (ret == -ENODEV || ret == -EINVAL) {
+ int ret2;
+
+ /*
+ * We get -EINVAL if avdd is a supply with unknown voltage. We
+ * still need to enable it since it is also a power supply.
+ */
+ ret2 = devm_regulator_get_enable(dev, "avdd");
+ if (ret2)
+ return dev_err_probe(dev, ret2,
+ "Failed to enable AVDD supply\n");
+ } else if (ret < 0) {
+ return dev_err_probe(dev, ret, "Failed to get AVDD voltage\n");
}
- ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->avdd);
- if (ret)
- return ret;
+ avdd_mv = ret == -ENODEV || ret == -EINVAL ? 0 : ret / MILLI;
- ret = devm_regulator_get_enable(&spi->dev, "dvdd");
+ ret = devm_regulator_get_enable(dev, "dvdd");
if (ret)
- return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n");
-
- st->vref = devm_regulator_get_optional(&spi->dev, "vref");
- if (IS_ERR(st->vref)) {
- if (PTR_ERR(st->vref) != -ENODEV)
- return PTR_ERR(st->vref);
-
- ret = regulator_get_voltage(st->avdd);
- if (ret < 0)
- return dev_err_probe(&spi->dev, ret,
- "Device tree error, AVdd voltage undefined\n");
- } else {
- ret = regulator_enable(st->vref);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified Vref supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->vref);
- if (ret < 0)
- return dev_err_probe(&spi->dev, ret,
- "Device tree error, Vref voltage undefined\n");
+ return dev_err_probe(dev, ret, "Failed to enable specified DVdd supply\n");
+
+ /*
+ * This is either REFIN1 or REFIN2 depending on adi,refin2-pins-enable.
+ * If this supply is not present, fall back to AVDD as reference.
+ */
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret == -ENODEV) {
+ if (avdd_mv == 0)
+ return dev_err_probe(dev, -ENODEV,
+ "No reference voltage available\n");
+ } else if (ret < 0) {
+ return ret;
}
- st->int_vref_mv = ret / 1000;
+
+ st->int_vref_mv = ret == -ENODEV ? avdd_mv : ret / MILLI;
st->chip_info = spi_get_device_match_data(spi);
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = st->chip_info->info;
+ if (st->chip_info->parse_channels) {
+ ret = st->chip_info->parse_channels(indio_dev);
+ if (ret)
+ return ret;
+ } else {
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ }
- ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
+ ret = ad_sd_init(&st->sd, indio_dev, spi, st->chip_info->sigma_delta_info);
if (ret)
return ret;
- ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(dev, indio_dev);
if (ret)
return ret;
st->fclk = AD7192_INT_FREQ_MHZ;
- st->mclk = devm_clk_get_optional_enabled(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_optional_enabled(dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
@@ -1145,24 +1286,23 @@ static int ad7192_probe(struct spi_device *spi)
if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
st->clock_sel == AD7192_CLK_EXT_MCLK2) {
st->fclk = clk_get_rate(st->mclk);
- if (!ad7192_valid_external_frequency(st->fclk)) {
- dev_err(&spi->dev,
- "External clock frequency out of bounds\n");
- return -EINVAL;
- }
+ if (!ad7192_valid_external_frequency(st->fclk))
+ return dev_err_probe(dev, -EINVAL,
+ "External clock frequency out of bounds\n");
}
- ret = ad7192_setup(indio_dev, &spi->dev);
+ ret = ad7192_setup(indio_dev, dev);
if (ret)
return ret;
- return devm_iio_device_register(&spi->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static const struct of_device_id ad7192_of_match[] = {
{ .compatible = "adi,ad7190", .data = &ad7192_chip_info_tbl[ID_AD7190] },
{ .compatible = "adi,ad7192", .data = &ad7192_chip_info_tbl[ID_AD7192] },
{ .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] },
+ { .compatible = "adi,ad7194", .data = &ad7192_chip_info_tbl[ID_AD7194] },
{ .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] },
{}
};
@@ -1172,6 +1312,7 @@ static const struct spi_device_id ad7192_ids[] = {
{ "ad7190", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7190] },
{ "ad7192", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7192] },
{ "ad7193", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7193] },
+ { "ad7194", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7194] },
{ "ad7195", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7195] },
{}
};
@@ -1188,6 +1329,6 @@ static struct spi_driver ad7192_driver = {
module_spi_driver(ad7192_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7193, AD7195 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7192 and similar ADC");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(IIO_AD_SIGMA_DELTA);
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 353a97f9c086..bdac020045b4 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -23,9 +23,10 @@
#include <linux/platform_data/ad7266.h>
+#define AD7266_INTERNAL_REF_MV 2500
+
struct ad7266_state {
struct spi_device *spi;
- struct regulator *reg;
unsigned long vref_mv;
struct spi_transfer single_xfer[3];
@@ -157,6 +158,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
ret = ad7266_read_single(st, val, chan->address);
iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val,
@@ -377,11 +380,6 @@ static const char * const ad7266_gpio_labels[] = {
"ad0", "ad1", "ad2",
};
-static void ad7266_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
@@ -396,28 +394,11 @@ static int ad7266_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7266_reg_disable, st->reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0)
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
- st->vref_mv = ret / 1000;
- } else {
- /* Any other error indicates that the regulator does exist */
- if (PTR_ERR(st->reg) != -ENODEV)
- return PTR_ERR(st->reg);
- /* Use internal reference */
- st->vref_mv = 2500;
- }
+ st->vref_mv = ret == -ENODEV ? AD7266_INTERNAL_REF_MV : ret / 1000;
if (pdata) {
st->fixed_addr = pdata->fixed_addr;
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 14d02b085d3b..b59b2a51623c 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -536,7 +536,7 @@ static int ad7291_probe(struct i2c_client *client)
}
static const struct i2c_device_id ad7291_id[] = {
- { "ad7291", 0 },
+ { "ad7291" },
{}
};
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 6aadd14f459d..ede80f380911 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -17,6 +17,8 @@
#define ADI_VENDOR_ID 0x0018
+#define AD7292_INTERNAL_REF_MV 1250
+
/* AD7292 registers definition */
#define AD7292_REG_VENDOR_ID 0x00
#define AD7292_REG_CONF_BANK 0x05
@@ -79,7 +81,6 @@ static const struct iio_chan_spec ad7292_channels_diff[] = {
struct ad7292_state {
struct spi_device *spi;
- struct regulator *reg;
unsigned short vref_mv;
__be16 d16 __aligned(IIO_DMA_MINALIGN);
@@ -250,13 +251,6 @@ static const struct iio_info ad7292_info = {
.read_raw = ad7292_read_raw,
};
-static void ad7292_regulator_disable(void *data)
-{
- struct ad7292_state *st = data;
-
- regulator_disable(st->reg);
-}
-
static int ad7292_probe(struct spi_device *spi)
{
struct ad7292_state *st;
@@ -277,29 +271,11 @@ static int ad7292_probe(struct spi_device *spi)
return -EINVAL;
}
- st->reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret) {
- dev_err(&spi->dev,
- "Failed to enable external vref supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev,
- ad7292_regulator_disable, st);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0)
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
- st->vref_mv = ret / 1000;
- } else {
- /* Use the internal voltage reference. */
- st->vref_mv = 1250;
- }
+ st->vref_mv = ret == -ENODEV ? AD7292_INTERNAL_REF_MV : ret / 1000;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
new file mode 100644
index 000000000000..7568cd0a2b32
--- /dev/null
+++ b/drivers/iio/adc/ad7380.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD738x Simultaneous Sampling SAR ADCs
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
+ *
+ * Datasheets of supported parts:
+ * ad7380/1 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7380-7381.pdf
+ * ad7383/4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-7384.pdf
+ * ad7380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7380-4.pdf
+ * ad7381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7381-4.pdf
+ * ad7383/4-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-4-ad7384-4.pdf
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define MAX_NUM_CHANNELS 4
+/* 2.5V internal reference voltage */
+#define AD7380_INTERNAL_REF_MV 2500
+
+/* reading and writing registers is more reliable at lower than max speed */
+#define AD7380_REG_WR_SPEED_HZ 10000000
+
+#define AD7380_REG_WR BIT(15)
+#define AD7380_REG_REGADDR GENMASK(14, 12)
+#define AD7380_REG_DATA GENMASK(11, 0)
+
+#define AD7380_REG_ADDR_NOP 0x0
+#define AD7380_REG_ADDR_CONFIG1 0x1
+#define AD7380_REG_ADDR_CONFIG2 0x2
+#define AD7380_REG_ADDR_ALERT 0x3
+#define AD7380_REG_ADDR_ALERT_LOW_TH 0x4
+#define AD7380_REG_ADDR_ALERT_HIGH_TH 0x5
+
+#define AD7380_CONFIG1_OS_MODE BIT(9)
+#define AD7380_CONFIG1_OSR GENMASK(8, 6)
+#define AD7380_CONFIG1_CRC_W BIT(5)
+#define AD7380_CONFIG1_CRC_R BIT(4)
+#define AD7380_CONFIG1_ALERTEN BIT(3)
+#define AD7380_CONFIG1_RES BIT(2)
+#define AD7380_CONFIG1_REFSEL BIT(1)
+#define AD7380_CONFIG1_PMODE BIT(0)
+
+#define AD7380_CONFIG2_SDO2 GENMASK(9, 8)
+#define AD7380_CONFIG2_SDO BIT(8)
+#define AD7380_CONFIG2_RESET GENMASK(7, 0)
+
+#define AD7380_CONFIG2_RESET_SOFT 0x3C
+#define AD7380_CONFIG2_RESET_HARD 0xFF
+
+#define AD7380_ALERT_LOW_TH GENMASK(11, 0)
+#define AD7380_ALERT_HIGH_TH GENMASK(11, 0)
+
+#define T_CONVERT_NS 190 /* conversion time */
+#define T_CONVERT_0_NS 10 /* 1st conversion start time (oversampling) */
+#define T_CONVERT_X_NS 500 /* xth conversion start time (oversampling) */
+
+struct ad7380_timing_specs {
+ const unsigned int t_csh_ns; /* CS minimum high time */
+};
+
+struct ad7380_chip_info {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const char * const *vcm_supplies;
+ unsigned int num_vcm_supplies;
+ const unsigned long *available_scan_masks;
+ const struct ad7380_timing_specs *timing_specs;
+};
+
+enum {
+ AD7380_SCAN_TYPE_NORMAL,
+ AD7380_SCAN_TYPE_RESOLUTION_BOOST,
+};
+
+/* Extended scan types for 14-bit chips. */
+static const struct iio_scan_type ad7380_scan_type_14[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+};
+
+/* Extended scan types for 16-bit chips. */
+static const struct iio_scan_type ad7380_scan_type_16[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU
+ },
+};
+
+#define AD7380_CHANNEL(index, bits, diff) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .indexed = 1, \
+ .differential = (diff), \
+ .channel = (diff) ? (2 * (index)) : (index), \
+ .channel2 = (diff) ? (2 * (index) + 1) : 0, \
+ .scan_index = (index), \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = ad7380_scan_type_##bits, \
+ .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits),\
+}
+
+#define DEFINE_AD7380_2_CHANNEL(name, bits, diff) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff), \
+ AD7380_CHANNEL(1, bits, diff), \
+ IIO_CHAN_SOFT_TIMESTAMP(2), \
+}
+
+#define DEFINE_AD7380_4_CHANNEL(name, bits, diff) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff), \
+ AD7380_CHANNEL(1, bits, diff), \
+ AD7380_CHANNEL(2, bits, diff), \
+ AD7380_CHANNEL(3, bits, diff), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+/* fully differential */
+DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1);
+DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1);
+DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1);
+DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1);
+/* pseudo differential */
+DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0);
+DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0);
+DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0);
+DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0);
+
+static const char * const ad7380_2_channel_vcm_supplies[] = {
+ "aina", "ainb",
+};
+
+static const char * const ad7380_4_channel_vcm_supplies[] = {
+ "aina", "ainb", "ainc", "aind",
+};
+
+/* Since this is simultaneous sampling, we don't allow individual channels. */
+static const unsigned long ad7380_2_channel_scan_masks[] = {
+ GENMASK(1, 0),
+ 0
+};
+
+static const unsigned long ad7380_4_channel_scan_masks[] = {
+ GENMASK(3, 0),
+ 0
+};
+
+static const struct ad7380_timing_specs ad7380_timing = {
+ .t_csh_ns = 10,
+};
+
+static const struct ad7380_timing_specs ad7380_4_timing = {
+ .t_csh_ns = 20,
+};
+
+/*
+ * Available oversampling ratios. The indices correspond with the bit value
+ * expected by the chip. The available ratios depend on the averaging mode,
+ * only normal averaging is supported for now.
+ */
+static const int ad7380_oversampling_ratios[] = {
+ 1, 2, 4, 8, 16, 32,
+};
+
+static const struct ad7380_chip_info ad7380_chip_info = {
+ .name = "ad7380",
+ .channels = ad7380_channels,
+ .num_channels = ARRAY_SIZE(ad7380_channels),
+ .available_scan_masks = ad7380_2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7381_chip_info = {
+ .name = "ad7381",
+ .channels = ad7381_channels,
+ .num_channels = ARRAY_SIZE(ad7381_channels),
+ .available_scan_masks = ad7380_2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7383_chip_info = {
+ .name = "ad7383",
+ .channels = ad7383_channels,
+ .num_channels = ARRAY_SIZE(ad7383_channels),
+ .vcm_supplies = ad7380_2_channel_vcm_supplies,
+ .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
+ .available_scan_masks = ad7380_2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7384_chip_info = {
+ .name = "ad7384",
+ .channels = ad7384_channels,
+ .num_channels = ARRAY_SIZE(ad7384_channels),
+ .vcm_supplies = ad7380_2_channel_vcm_supplies,
+ .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
+ .available_scan_masks = ad7380_2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7380_4_chip_info = {
+ .name = "ad7380-4",
+ .channels = ad7380_4_channels,
+ .num_channels = ARRAY_SIZE(ad7380_4_channels),
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7381_4_chip_info = {
+ .name = "ad7381-4",
+ .channels = ad7381_4_channels,
+ .num_channels = ARRAY_SIZE(ad7381_4_channels),
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7383_4_chip_info = {
+ .name = "ad7383-4",
+ .channels = ad7383_4_channels,
+ .num_channels = ARRAY_SIZE(ad7383_4_channels),
+ .vcm_supplies = ad7380_4_channel_vcm_supplies,
+ .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7384_4_chip_info = {
+ .name = "ad7384-4",
+ .channels = ad7384_4_channels,
+ .num_channels = ARRAY_SIZE(ad7384_4_channels),
+ .vcm_supplies = ad7380_4_channel_vcm_supplies,
+ .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+struct ad7380_state {
+ const struct ad7380_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regmap *regmap;
+ unsigned int oversampling_ratio;
+ bool resolution_boost_enabled;
+ unsigned int vref_mv;
+ unsigned int vcm_mv[MAX_NUM_CHANNELS];
+ /* xfers, message an buffer for reading sample data */
+ struct spi_transfer xfer[2];
+ struct spi_message msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the transfer buffers
+ * to live in their own cache lines.
+ *
+ * Make the buffer large enough for MAX_NUM_CHANNELS 32-bit samples and
+ * one 64-bit aligned 64-bit timestamp.
+ */
+ u8 scan_data[ALIGN(MAX_NUM_CHANNELS * sizeof(u32), sizeof(s64))
+ + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
+ /* buffers for reading/writing registers */
+ u16 tx;
+ u16 rx;
+};
+
+static int ad7380_regmap_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct ad7380_state *st = context;
+ struct spi_transfer xfer = {
+ .speed_hz = AD7380_REG_WR_SPEED_HZ,
+ .bits_per_word = 16,
+ .len = 2,
+ .tx_buf = &st->tx,
+ };
+
+ st->tx = FIELD_PREP(AD7380_REG_WR, 1) |
+ FIELD_PREP(AD7380_REG_REGADDR, reg) |
+ FIELD_PREP(AD7380_REG_DATA, val);
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad7380_regmap_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad7380_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .speed_hz = AD7380_REG_WR_SPEED_HZ,
+ .bits_per_word = 16,
+ .len = 2,
+ .tx_buf = &st->tx,
+ .cs_change = 1,
+ .cs_change_delay = {
+ .value = st->chip_info->timing_specs->t_csh_ns,
+ .unit = SPI_DELAY_UNIT_NSECS,
+ },
+ }, {
+ .speed_hz = AD7380_REG_WR_SPEED_HZ,
+ .bits_per_word = 16,
+ .len = 2,
+ .rx_buf = &st->rx,
+ },
+ };
+ int ret;
+
+ st->tx = FIELD_PREP(AD7380_REG_WR, 0) |
+ FIELD_PREP(AD7380_REG_REGADDR, reg) |
+ FIELD_PREP(AD7380_REG_DATA, 0);
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret < 0)
+ return ret;
+
+ *val = FIELD_GET(AD7380_REG_DATA, st->rx);
+
+ return 0;
+}
+
+static const struct regmap_config ad7380_regmap_config = {
+ .reg_bits = 3,
+ .val_bits = 12,
+ .reg_read = ad7380_regmap_reg_read,
+ .reg_write = ad7380_regmap_reg_write,
+ .max_register = AD7380_REG_ADDR_ALERT_HIGH_TH,
+ .can_sleep = true,
+};
+
+static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
+ u32 writeval, u32 *readval)
+{
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct ad7380_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ else
+ return regmap_write(st->regmap, reg, writeval);
+ }
+ unreachable();
+}
+
+/**
+ * ad7380_update_xfers - update the SPI transfers base on the current scan type
+ * @st: device instance specific state
+ * @scan_type: current scan type
+ */
+static void ad7380_update_xfers(struct ad7380_state *st,
+ const struct iio_scan_type *scan_type)
+{
+ /*
+ * First xfer only triggers conversion and has to be long enough for
+ * all conversions to complete, which can be multiple conversion in the
+ * case of oversampling. Technically T_CONVERT_X_NS is lower for some
+ * chips, but we use the maximum value for simplicity for now.
+ */
+ if (st->oversampling_ratio > 1)
+ st->xfer[0].delay.value = T_CONVERT_0_NS + T_CONVERT_X_NS *
+ (st->oversampling_ratio - 1);
+ else
+ st->xfer[0].delay.value = T_CONVERT_NS;
+
+ st->xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /*
+ * Second xfer reads all channels. Data size depends on if resolution
+ * boost is enabled or not.
+ */
+ st->xfer[1].bits_per_word = scan_type->realbits;
+ st->xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
+ (st->chip_info->num_channels - 1);
+}
+
+static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+
+ /*
+ * Currently, we always read all channels at the same time. The scan_type
+ * is the same for all channels, so we just pass the first channel.
+ */
+ scan_type = iio_get_current_scan_type(indio_dev, &indio_dev->channels[0]);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ ad7380_update_xfers(st, scan_type);
+
+ return spi_optimize_message(st->spi, &st->msg);
+}
+
+static int ad7380_triggered_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+
+ spi_unoptimize_message(&st->msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7380_buffer_setup_ops = {
+ .preenable = ad7380_triggered_buffer_preenable,
+ .postdisable = ad7380_triggered_buffer_postdisable,
+};
+
+static irqreturn_t ad7380_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan_data,
+ pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index,
+ const struct iio_scan_type *scan_type, int *val)
+{
+ int ret;
+
+ ad7380_update_xfers(st, scan_type);
+
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret < 0)
+ return ret;
+
+ if (scan_type->storagebits > 16)
+ *val = sign_extend32(*(u32 *)(st->scan_data + 4 * scan_index),
+ scan_type->realbits - 1);
+ else
+ *val = sign_extend32(*(u16 *)(st->scan_data + 2 * scan_index),
+ scan_type->realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int ad7380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ return ad7380_read_direct(st, chan->scan_index,
+ scan_type, val);
+ }
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * According to the datasheet, the LSB size is:
+ * * (2 × VREF) / 2^N, for differential chips
+ * * VREF / 2^N, for pseudo-differential chips
+ * where N is the ADC resolution (i.e realbits)
+ */
+ *val = st->vref_mv;
+ *val2 = scan_type->realbits - chan->differential;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * According to IIO ABI, offset is applied before scale,
+ * so offset is: vcm_mv / scale
+ */
+ *val = st->vcm_mv[chan->channel] * (1 << scan_type->realbits)
+ / st->vref_mv;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling_ratio;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ad7380_oversampling_ratios;
+ *length = ARRAY_SIZE(ad7380_oversampling_ratios);
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ad7380_osr_to_regval - convert ratio to OSR register value
+ * @ratio: ratio to check
+ *
+ * Check if ratio is present in the list of available ratios and return the
+ * corresponding value that needs to be written to the register to select that
+ * ratio.
+ *
+ * Returns: register value (0 to 7) or -EINVAL if there is not an exact match
+ */
+static int ad7380_osr_to_regval(int ratio)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ad7380_oversampling_ratios); i++) {
+ if (ratio == ad7380_oversampling_ratios[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int ad7380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret, osr, boost;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ osr = ad7380_osr_to_regval(val);
+ if (osr < 0)
+ return osr;
+
+ /* always enable resolution boost when oversampling is enabled */
+ boost = osr > 0 ? 1 : 0;
+
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_OSR | AD7380_CONFIG1_RES,
+ FIELD_PREP(AD7380_CONFIG1_OSR, osr) |
+ FIELD_PREP(AD7380_CONFIG1_RES, boost));
+
+ if (ret)
+ return ret;
+
+ st->oversampling_ratio = val;
+ st->resolution_boost_enabled = boost;
+
+ /*
+ * Perform a soft reset. This will flush the oversampling
+ * block and FIFO but will maintain the content of the
+ * configurable registers.
+ */
+ return regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG2,
+ AD7380_CONFIG2_RESET,
+ FIELD_PREP(AD7380_CONFIG2_RESET,
+ AD7380_CONFIG2_RESET_SOFT));
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+
+ return st->resolution_boost_enabled ? AD7380_SCAN_TYPE_RESOLUTION_BOOST
+ : AD7380_SCAN_TYPE_NORMAL;
+}
+
+static const struct iio_info ad7380_info = {
+ .read_raw = &ad7380_read_raw,
+ .read_avail = &ad7380_read_avail,
+ .write_raw = &ad7380_write_raw,
+ .get_current_scan_type = &ad7380_get_current_scan_type,
+ .debugfs_reg_access = &ad7380_debugfs_reg_access,
+};
+
+static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
+{
+ int ret;
+
+ /* perform hard reset */
+ ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG2,
+ AD7380_CONFIG2_RESET,
+ FIELD_PREP(AD7380_CONFIG2_RESET,
+ AD7380_CONFIG2_RESET_HARD));
+ if (ret < 0)
+ return ret;
+
+ /* select internal or external reference voltage */
+ ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_REFSEL,
+ FIELD_PREP(AD7380_CONFIG1_REFSEL,
+ vref ? 1 : 0));
+ if (ret < 0)
+ return ret;
+
+ /* This is the default value after reset. */
+ st->oversampling_ratio = 1;
+
+ /* SPI 1-wire mode */
+ return regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG2,
+ AD7380_CONFIG2_SDO,
+ FIELD_PREP(AD7380_CONFIG2_SDO, 1));
+}
+
+static void ad7380_regulator_disable(void *p)
+{
+ regulator_disable(p);
+}
+
+static int ad7380_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad7380_state *st;
+ struct regulator *vref;
+ int ret, i;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return dev_err_probe(&spi->dev, -EINVAL, "missing match data\n");
+
+ vref = devm_regulator_get_optional(&spi->dev, "refio");
+ if (IS_ERR(vref)) {
+ if (PTR_ERR(vref) != -ENODEV)
+ return dev_err_probe(&spi->dev, PTR_ERR(vref),
+ "Failed to get refio regulator\n");
+
+ vref = NULL;
+ }
+
+ /*
+ * If there is no REFIO supply, then it means that we are using
+ * the internal 2.5V reference, otherwise REFIO is reference voltage.
+ */
+ if (vref) {
+ ret = regulator_enable(vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7380_regulator_disable, vref);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(vref);
+ if (ret < 0)
+ return ret;
+
+ st->vref_mv = ret / 1000;
+ } else {
+ st->vref_mv = AD7380_INTERNAL_REF_MV;
+ }
+
+ if (st->chip_info->num_vcm_supplies > ARRAY_SIZE(st->vcm_mv))
+ return dev_err_probe(&spi->dev, -EINVAL,
+ "invalid number of VCM supplies\n");
+
+ /*
+ * pseudo-differential chips have common mode supplies for the negative
+ * input pin.
+ */
+ for (i = 0; i < st->chip_info->num_vcm_supplies; i++) {
+ struct regulator *vcm;
+
+ vcm = devm_regulator_get(&spi->dev,
+ st->chip_info->vcm_supplies[i]);
+ if (IS_ERR(vcm))
+ return dev_err_probe(&spi->dev, PTR_ERR(vcm),
+ "Failed to get %s regulator\n",
+ st->chip_info->vcm_supplies[i]);
+
+ ret = regulator_enable(vcm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7380_regulator_disable, vcm);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(vcm);
+ if (ret < 0)
+ return ret;
+
+ st->vcm_mv[i] = ret / 1000;
+ }
+
+ st->regmap = devm_regmap_init(&spi->dev, NULL, st, &ad7380_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->regmap),
+ "failed to allocate register map\n");
+
+ /*
+ * Setting up a low latency read for getting sample data. Used for both
+ * direct read an triggered buffer. Additional fields will be set up in
+ * ad7380_update_xfers() based on the current state of the driver at the
+ * time of the read.
+ */
+
+ /* toggle CS (no data xfer) to trigger a conversion */
+ st->xfer[0].cs_change = 1;
+ st->xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* then do a second xfer to read the data */
+ st->xfer[1].rx_buf = st->scan_data;
+
+ spi_message_init_with_transfers(&st->msg, st->xfer, ARRAY_SIZE(st->xfer));
+
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->name = st->chip_info->name;
+ indio_dev->info = &ad7380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = st->chip_info->available_scan_masks;
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad7380_trigger_handler,
+ &ad7380_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = ad7380_init(st, vref);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id ad7380_of_match_table[] = {
+ { .compatible = "adi,ad7380", .data = &ad7380_chip_info },
+ { .compatible = "adi,ad7381", .data = &ad7381_chip_info },
+ { .compatible = "adi,ad7383", .data = &ad7383_chip_info },
+ { .compatible = "adi,ad7384", .data = &ad7384_chip_info },
+ { .compatible = "adi,ad7380-4", .data = &ad7380_4_chip_info },
+ { .compatible = "adi,ad7381-4", .data = &ad7381_4_chip_info },
+ { .compatible = "adi,ad7383-4", .data = &ad7383_4_chip_info },
+ { .compatible = "adi,ad7384-4", .data = &ad7384_4_chip_info },
+ { }
+};
+
+static const struct spi_device_id ad7380_id_table[] = {
+ { "ad7380", (kernel_ulong_t)&ad7380_chip_info },
+ { "ad7381", (kernel_ulong_t)&ad7381_chip_info },
+ { "ad7383", (kernel_ulong_t)&ad7383_chip_info },
+ { "ad7384", (kernel_ulong_t)&ad7384_chip_info },
+ { "ad7380-4", (kernel_ulong_t)&ad7380_4_chip_info },
+ { "ad7381-4", (kernel_ulong_t)&ad7381_4_chip_info },
+ { "ad7383-4", (kernel_ulong_t)&ad7383_4_chip_info },
+ { "ad7384-4", (kernel_ulong_t)&ad7384_4_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7380_id_table);
+
+static struct spi_driver ad7380_driver = {
+ .driver = {
+ .name = "ad7380",
+ .of_match_table = ad7380_of_match_table,
+ },
+ .probe = ad7380_probe,
+ .id_table = ad7380_id_table,
+};
+module_spi_driver(ad7380_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD738x ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 1928d9ae5bcf..3a417595294f 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -174,17 +174,14 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = ad7606_scan_direct(indio_dev, chan->address);
- iio_device_release_direct_mode(indio_dev);
-
- if (ret < 0)
- return ret;
- *val = (short)ret;
- return IIO_VAL_INT;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = ad7606_scan_direct(indio_dev, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = (short) ret;
+ return IIO_VAL_INT;
+ }
+ unreachable();
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
ch = chan->address;
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 5f8cb9aaac70..d4ad7e0b515a 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -152,7 +152,6 @@ struct ad7793_chip_info {
struct ad7793_state {
const struct ad7793_chip_info *chip_info;
- struct regulator *reg;
u16 int_vref_mv;
u16 mode;
u16 conf;
@@ -769,11 +768,6 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
},
};
-static void ad7793_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad7793_probe(struct spi_device *spi)
{
const struct ad7793_platform_data *pdata = spi->dev.platform_data;
@@ -800,23 +794,11 @@ static int ad7793_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7793_sigma_delta_info);
if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
- st->reg = devm_regulator_get(&spi->dev, "refin");
- if (IS_ERR(st->reg))
- return PTR_ERR(st->reg);
-
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7793_reg_disable, st->reg);
- if (ret)
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "refin");
+ if (ret < 0)
return ret;
- vref_mv = regulator_get_voltage(st->reg);
- if (vref_mv < 0)
- return vref_mv;
-
- vref_mv /= 1000;
+ vref_mv = ret / 1000;
} else {
vref_mv = 1170; /* Build-in ref */
}
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index 4602ab5ed2a6..0f36138a7144 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -134,18 +134,12 @@ AD7944_DEFINE_CHIP_INFO(ad7985, ad7944, 16, 0);
/* fully differential */
AD7944_DEFINE_CHIP_INFO(ad7986, ad7986, 18, 1);
-static void ad7944_unoptimize_msg(void *msg)
-{
- spi_unoptimize_message(msg);
-}
-
static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *adc,
const struct iio_chan_spec *chan)
{
unsigned int t_conv_ns = adc->always_turbo ? adc->timing_spec->turbo_conv_ns
: adc->timing_spec->conv_ns;
struct spi_transfer *xfers = adc->xfers;
- int ret;
/*
* NB: can get better performance from some SPI controllers if we use
@@ -175,11 +169,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
spi_message_init_with_transfers(&adc->msg, xfers, 3);
- ret = spi_optimize_message(adc->spi, &adc->msg);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, ad7944_unoptimize_msg, &adc->msg);
+ return devm_spi_optimize_message(dev, adc->spi, &adc->msg);
}
static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc,
@@ -188,7 +178,6 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
unsigned int t_conv_ns = adc->always_turbo ? adc->timing_spec->turbo_conv_ns
: adc->timing_spec->conv_ns;
struct spi_transfer *xfers = adc->xfers;
- int ret;
/*
* NB: can get better performance from some SPI controllers if we use
@@ -209,11 +198,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
spi_message_init_with_transfers(&adc->msg, xfers, 2);
- ret = spi_optimize_message(adc->spi, &adc->msg);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, ad7944_unoptimize_msg, &adc->msg);
+ return devm_spi_optimize_message(dev, adc->spi, &adc->msg);
}
static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc,
@@ -221,7 +206,6 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
u32 n_chain_dev)
{
struct spi_transfer *xfers = adc->xfers;
- int ret;
/*
* NB: SCLK has to be low before we toggle CS to avoid triggering the
@@ -249,17 +233,12 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
spi_message_init_with_transfers(&adc->msg, xfers, 2);
- ret = spi_optimize_message(adc->spi, &adc->msg);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, ad7944_unoptimize_msg, &adc->msg);
+ return devm_spi_optimize_message(dev, adc->spi, &adc->msg);
}
/**
* ad7944_convert_and_acquire - Perform a single conversion and acquisition
* @adc: The ADC device structure
- * @chan: The channel specification
* Return: 0 on success, a negative error code on failure
*
* Perform a conversion and acquisition of a single sample using the
@@ -268,8 +247,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
* Upon successful return adc->sample.raw will contain the conversion result
* (or adc->chain_mode_buf if the device is using chain mode).
*/
-static int ad7944_convert_and_acquire(struct ad7944_adc *adc,
- const struct iio_chan_spec *chan)
+static int ad7944_convert_and_acquire(struct ad7944_adc *adc)
{
int ret;
@@ -291,7 +269,7 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
{
int ret;
- ret = ad7944_convert_and_acquire(adc, chan);
+ ret = ad7944_convert_and_acquire(adc);
if (ret)
return ret;
@@ -361,7 +339,7 @@ static irqreturn_t ad7944_trigger_handler(int irq, void *p)
struct ad7944_adc *adc = iio_priv(indio_dev);
int ret;
- ret = ad7944_convert_and_acquire(adc, &indio_dev->channels[0]);
+ ret = ad7944_convert_and_acquire(adc);
if (ret)
goto out;
@@ -466,23 +444,17 @@ static const char * const ad7944_power_supplies[] = {
"avdd", "dvdd", "bvdd", "vio"
};
-static void ad7944_ref_disable(void *ref)
-{
- regulator_disable(ref);
-}
-
static int ad7944_probe(struct spi_device *spi)
{
const struct ad7944_chip_info *chip_info;
struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct ad7944_adc *adc;
- bool have_refin = false;
- struct regulator *ref;
+ bool have_refin;
struct iio_chan_spec *chain_chan;
unsigned long *chain_scan_masks;
u32 n_chain_dev;
- int ret;
+ int ret, ref_mv;
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
if (!indio_dev)
@@ -533,47 +505,23 @@ static int ad7944_probe(struct spi_device *spi)
* - external reference: REF is connected, REFIN is not connected
*/
- ref = devm_regulator_get_optional(dev, "ref");
- if (IS_ERR(ref)) {
- if (PTR_ERR(ref) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(ref),
- "failed to get REF supply\n");
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get REF voltage\n");
- ref = NULL;
- }
+ ref_mv = ret == -ENODEV ? 0 : ret / 1000;
ret = devm_regulator_get_enable_optional(dev, "refin");
- if (ret == 0)
- have_refin = true;
- else if (ret != -ENODEV)
- return dev_err_probe(dev, ret,
- "failed to get and enable REFIN supply\n");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get REFIN voltage\n");
+
+ have_refin = ret != -ENODEV;
- if (have_refin && ref)
+ if (have_refin && ref_mv)
return dev_err_probe(dev, -EINVAL,
"cannot have both refin and ref supplies\n");
- if (ref) {
- ret = regulator_enable(ref);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to enable REF supply\n");
-
- ret = devm_add_action_or_reset(dev, ad7944_ref_disable, ref);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(ref);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to get REF voltage\n");
-
- /* external reference */
- adc->ref_mv = ret / 1000;
- } else {
- /* internal reference */
- adc->ref_mv = AD7944_INTERNAL_REF_MV;
- }
+ adc->ref_mv = ref_mv ?: AD7944_INTERNAL_REF_MV;
adc->cnv = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
if (IS_ERR(adc->cnv))
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index e85b763b9ffc..41c1b519c573 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -107,27 +107,27 @@
#define AD9647_MAX_TEST_POINTS 32
struct ad9467_chip_info {
- const char *name;
- unsigned int id;
- const struct iio_chan_spec *channels;
- unsigned int num_channels;
- const unsigned int (*scale_table)[2];
- int num_scales;
- unsigned long max_rate;
- unsigned int default_output_mode;
- unsigned int vref_mask;
- unsigned int num_lanes;
+ const char *name;
+ unsigned int id;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+ unsigned long max_rate;
+ unsigned int default_output_mode;
+ unsigned int vref_mask;
+ unsigned int num_lanes;
/* data clock output */
- bool has_dco;
+ bool has_dco;
};
struct ad9467_state {
- const struct ad9467_chip_info *info;
- struct iio_backend *back;
- struct spi_device *spi;
- struct clk *clk;
- unsigned int output_mode;
- unsigned int (*scales)[2];
+ const struct ad9467_chip_info *info;
+ struct iio_backend *back;
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
+ unsigned int (*scales)[2];
/*
* Times 2 because we may also invert the signal polarity and run the
* calibration again. For some reference on the test points (ad9265) see:
@@ -138,12 +138,13 @@ struct ad9467_state {
* at the io delay control section.
*/
DECLARE_BITMAP(calib_map, AD9647_MAX_TEST_POINTS * 2);
- struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *pwrdown_gpio;
/* ensure consistent state obtained on multiple related accesses */
- struct mutex lock;
+ struct mutex lock;
+ u8 buf[3] __aligned(IIO_DMA_MINALIGN);
};
-static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+static int ad9467_spi_read(struct ad9467_state *st, unsigned int reg)
{
unsigned char tbuf[2], rbuf[1];
int ret;
@@ -151,7 +152,7 @@ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
tbuf[0] = 0x80 | (reg >> 8);
tbuf[1] = reg & 0xFF;
- ret = spi_write_then_read(spi,
+ ret = spi_write_then_read(st->spi,
tbuf, ARRAY_SIZE(tbuf),
rbuf, ARRAY_SIZE(rbuf));
@@ -161,35 +162,32 @@ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
return rbuf[0];
}
-static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
+static int ad9467_spi_write(struct ad9467_state *st, unsigned int reg,
unsigned int val)
{
- unsigned char buf[3];
-
- buf[0] = reg >> 8;
- buf[1] = reg & 0xFF;
- buf[2] = val;
+ st->buf[0] = reg >> 8;
+ st->buf[1] = reg & 0xFF;
+ st->buf[2] = val;
- return spi_write(spi, buf, ARRAY_SIZE(buf));
+ return spi_write(st->spi, st->buf, ARRAY_SIZE(st->buf));
}
static int ad9467_reg_access(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
struct ad9467_state *st = iio_priv(indio_dev);
- struct spi_device *spi = st->spi;
int ret;
if (!readval) {
guard(mutex)(&st->lock);
- ret = ad9467_spi_write(spi, reg, writeval);
+ ret = ad9467_spi_write(st, reg, writeval);
if (ret)
return ret;
- return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
}
- ret = ad9467_spi_read(spi, reg);
+ ret = ad9467_spi_read(st, reg);
if (ret < 0)
return ret;
*readval = ret;
@@ -243,11 +241,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 'S'),
+ AD9467_CHAN(0, 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 'S'),
+ AD9467_CHAN(0, 0, 16, 's'),
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
@@ -295,7 +293,7 @@ static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
unsigned int i, vref_val;
int ret;
- ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+ ret = ad9467_spi_read(st, AN877_ADC_REG_VREF);
if (ret < 0)
return ret;
@@ -330,31 +328,31 @@ static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
continue;
guard(mutex)(&st->lock);
- ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+ ret = ad9467_spi_write(st, AN877_ADC_REG_VREF,
info->scale_table[i][1]);
if (ret < 0)
return ret;
- return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
}
return -EINVAL;
}
-static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+static int ad9467_outputmode_set(struct ad9467_state *st, unsigned int mode)
{
int ret;
- ret = ad9467_spi_write(spi, AN877_ADC_REG_OUTPUT_MODE, mode);
+ ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_MODE, mode);
if (ret < 0)
return ret;
- return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
}
-static int ad9647_calibrate_prepare(const struct ad9467_state *st)
+static int ad9647_calibrate_prepare(struct ad9467_state *st)
{
struct iio_backend_data_fmt data = {
.enable = false,
@@ -362,17 +360,17 @@ static int ad9647_calibrate_prepare(const struct ad9467_state *st)
unsigned int c;
int ret;
- ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TEST_IO,
+ ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
AN877_ADC_TESTMODE_PN9_SEQ);
if (ret)
return ret;
- ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ ret = ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
if (ret)
return ret;
- ret = ad9467_outputmode_set(st->spi, st->info->default_output_mode);
+ ret = ad9467_outputmode_set(st, st->info->default_output_mode);
if (ret)
return ret;
@@ -390,7 +388,7 @@ static int ad9647_calibrate_prepare(const struct ad9467_state *st)
return iio_backend_chan_enable(st->back, 0);
}
-static int ad9647_calibrate_polarity_set(const struct ad9467_state *st,
+static int ad9647_calibrate_polarity_set(struct ad9467_state *st,
bool invert)
{
enum iio_backend_sample_trigger trigger;
@@ -401,7 +399,7 @@ static int ad9647_calibrate_polarity_set(const struct ad9467_state *st,
if (invert)
phase |= AN877_ADC_INVERT_DCO_CLK;
- return ad9467_spi_write(st->spi, AN877_ADC_REG_OUTPUT_PHASE,
+ return ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_PHASE,
phase);
}
@@ -437,19 +435,18 @@ static unsigned int ad9467_find_optimal_point(const unsigned long *calib_map,
return cnt;
}
-static int ad9467_calibrate_apply(const struct ad9467_state *st,
- unsigned int val)
+static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val)
{
unsigned int lane;
int ret;
if (st->info->has_dco) {
- ret = ad9467_spi_write(st->spi, AN877_ADC_REG_OUTPUT_DELAY,
+ ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_DELAY,
val);
if (ret)
return ret;
- return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
}
@@ -462,7 +459,7 @@ static int ad9467_calibrate_apply(const struct ad9467_state *st,
return 0;
}
-static int ad9647_calibrate_stop(const struct ad9467_state *st)
+static int ad9647_calibrate_stop(struct ad9467_state *st)
{
struct iio_backend_data_fmt data = {
.sign_extend = true,
@@ -487,16 +484,16 @@ static int ad9647_calibrate_stop(const struct ad9467_state *st)
}
mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
- ret = ad9467_outputmode_set(st->spi, mode);
+ ret = ad9467_outputmode_set(st, mode);
if (ret)
return ret;
- ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TEST_IO,
+ ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
AN877_ADC_TESTMODE_OFF);
if (ret)
return ret;
- return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
AN877_ADC_TRANSFER_SYNC);
}
@@ -846,7 +843,7 @@ static int ad9467_probe(struct spi_device *spi)
if (ret)
return ret;
- id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ id = ad9467_spi_read(st, AN877_ADC_REG_CHIP_ID);
if (id != st->info->id) {
dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
id, st->info->id);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index a2b87f6b7a07..8c062b0d26e3 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -321,6 +321,7 @@ out:
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ ad_sigma_delta_disable_one(sigma_delta, chan->address);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
iio_device_release_direct_mode(indio_dev);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 0cf0d81358fd..21ce7564e83d 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -42,6 +42,9 @@
#define ADI_AXI_ADC_REG_CTRL 0x0044
#define ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK BIT(1)
+#define ADI_AXI_ADC_REG_DRP_STATUS 0x0074
+#define ADI_AXI_ADC_DRP_LOCKED BIT(17)
+
/* ADC Channel controls */
#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
@@ -83,14 +86,26 @@ struct adi_axi_adc_state {
static int axi_adc_enable(struct iio_backend *back)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ unsigned int __val;
int ret;
+ guard(mutex)(&st->lock);
ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
ADI_AXI_REG_RSTN_MMCM_RSTN);
if (ret)
return ret;
- fsleep(10000);
+ /*
+ * Make sure the DRP (Dynamic Reconfiguration Port) is locked. Not all
+ * designs really use it but if they don't we still get the lock bit
+ * set. So let's do it all the time so the code is generic.
+ */
+ ret = regmap_read_poll_timeout(st->regmap, ADI_AXI_ADC_REG_DRP_STATUS,
+ __val, __val & ADI_AXI_ADC_DRP_LOCKED,
+ 100, 1000);
+ if (ret)
+ return ret;
+
return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
}
@@ -99,6 +114,7 @@ static void axi_adc_disable(struct iio_backend *back)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ guard(mutex)(&st->lock);
regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
}
@@ -292,7 +308,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&axi_adc_regmap_config);
if (IS_ERR(st->regmap))
- return PTR_ERR(st->regmap);
+ return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap),
+ "failed to init register map\n");
expected_ver = device_get_match_data(&pdev->dev);
if (!expected_ver)
@@ -300,7 +317,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
- return PTR_ERR(clk);
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "failed to get clock\n");
/*
* Force disable the core. Up to the frontend to enable us. And we can
@@ -328,7 +346,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
if (ret)
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register iio backend\n");
dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
ADI_AXI_PCORE_VER_MAJOR(ver),
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 998e8bcc06e1..090416c0d622 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -108,7 +108,6 @@ struct adc_gain {
struct aspeed_adc_data {
struct device *dev;
const struct aspeed_adc_model_data *model_data;
- struct regulator *regulator;
void __iomem *base;
spinlock_t clk_lock;
struct clk_hw *fixed_div_clk;
@@ -404,13 +403,6 @@ static void aspeed_adc_power_down(void *data)
priv_data->base + ASPEED_REG_ENGINE_CONTROL);
}
-static void aspeed_adc_reg_disable(void *data)
-{
- struct regulator *reg = data;
-
- regulator_disable(reg);
-}
-
static int aspeed_adc_vref_config(struct iio_dev *indio_dev)
{
struct aspeed_adc_data *data = iio_priv(indio_dev);
@@ -423,18 +415,14 @@ static int aspeed_adc_vref_config(struct iio_dev *indio_dev)
}
adc_engine_control_reg_val =
readl(data->base + ASPEED_REG_ENGINE_CONTROL);
- data->regulator = devm_regulator_get_optional(data->dev, "vref");
- if (!IS_ERR(data->regulator)) {
- ret = regulator_enable(data->regulator);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(
- data->dev, aspeed_adc_reg_disable, data->regulator);
- if (ret)
- return ret;
- data->vref_mv = regulator_get_voltage(data->regulator);
- /* Conversion from uV to mV */
- data->vref_mv /= 1000;
+
+ ret = devm_regulator_get_enable_read_voltage(data->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
+
+ if (ret != -ENODEV) {
+ data->vref_mv = ret / 1000;
+
if ((data->vref_mv >= 1550) && (data->vref_mv <= 2700))
writel(adc_engine_control_reg_val |
FIELD_PREP(
@@ -453,8 +441,6 @@ static int aspeed_adc_vref_config(struct iio_dev *indio_dev)
return -EOPNOTSUPP;
}
} else {
- if (PTR_ERR(data->regulator) != -ENODEV)
- return PTR_ERR(data->regulator);
data->vref_mv = 2500000;
of_property_read_u32(data->dev->of_node,
"aspeed,int-vref-microvolt",
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index d6c51b0f48e3..b487e577befb 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -22,11 +22,19 @@
#include <linux/iio/machine.h>
#include <linux/mfd/axp20x.h>
+#define AXP192_ADC_EN1_MASK GENMASK(7, 0)
+#define AXP192_ADC_EN2_MASK (GENMASK(3, 0) | BIT(7))
+
#define AXP20X_ADC_EN1_MASK GENMASK(7, 0)
#define AXP20X_ADC_EN2_MASK (GENMASK(3, 2) | BIT(7))
#define AXP22X_ADC_EN1_MASK (GENMASK(7, 5) | BIT(0))
+#define AXP192_GPIO30_IN_RANGE_GPIO0 BIT(0)
+#define AXP192_GPIO30_IN_RANGE_GPIO1 BIT(1)
+#define AXP192_GPIO30_IN_RANGE_GPIO2 BIT(2)
+#define AXP192_GPIO30_IN_RANGE_GPIO3 BIT(3)
+
#define AXP20X_GPIO10_IN_RANGE_GPIO0 BIT(0)
#define AXP20X_GPIO10_IN_RANGE_GPIO1 BIT(1)
@@ -71,6 +79,25 @@ struct axp20x_adc_iio {
const struct axp_data *data;
};
+enum axp192_adc_channel_v {
+ AXP192_ACIN_V = 0,
+ AXP192_VBUS_V,
+ AXP192_TS_IN,
+ AXP192_GPIO0_V,
+ AXP192_GPIO1_V,
+ AXP192_GPIO2_V,
+ AXP192_GPIO3_V,
+ AXP192_IPSOUT_V,
+ AXP192_BATT_V,
+};
+
+enum axp192_adc_channel_i {
+ AXP192_ACIN_I = 0,
+ AXP192_VBUS_I,
+ AXP192_BATT_CHRG_I,
+ AXP192_BATT_DISCHRG_I,
+};
+
enum axp20x_adc_channel_v {
AXP20X_ACIN_V = 0,
AXP20X_VBUS_V,
@@ -158,6 +185,43 @@ static struct iio_map axp22x_maps[] = {
* The only exception is for the battery. batt_v will be in_voltage6_raw and
* charge current in_current6_raw and discharge current will be in_current7_raw.
*/
+static const struct iio_chan_spec axp192_adc_channels[] = {
+ AXP20X_ADC_CHANNEL(AXP192_ACIN_V, "acin_v", IIO_VOLTAGE,
+ AXP20X_ACIN_V_ADC_H),
+ AXP20X_ADC_CHANNEL(AXP192_ACIN_I, "acin_i", IIO_CURRENT,
+ AXP20X_ACIN_I_ADC_H),
+ AXP20X_ADC_CHANNEL(AXP192_VBUS_V, "vbus_v", IIO_VOLTAGE,
+ AXP20X_VBUS_V_ADC_H),
+ AXP20X_ADC_CHANNEL(AXP192_VBUS_I, "vbus_i", IIO_CURRENT,
+ AXP20X_VBUS_I_ADC_H),
+ {
+ .type = IIO_TEMP,
+ .address = AXP20X_TEMP_ADC_H,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .datasheet_name = "pmic_temp",
+ },
+ AXP20X_ADC_CHANNEL_OFFSET(AXP192_GPIO0_V, "gpio0_v", IIO_VOLTAGE,
+ AXP20X_GPIO0_V_ADC_H),
+ AXP20X_ADC_CHANNEL_OFFSET(AXP192_GPIO1_V, "gpio1_v", IIO_VOLTAGE,
+ AXP20X_GPIO1_V_ADC_H),
+ AXP20X_ADC_CHANNEL_OFFSET(AXP192_GPIO2_V, "gpio2_v", IIO_VOLTAGE,
+ AXP192_GPIO2_V_ADC_H),
+ AXP20X_ADC_CHANNEL_OFFSET(AXP192_GPIO3_V, "gpio3_v", IIO_VOLTAGE,
+ AXP192_GPIO3_V_ADC_H),
+ AXP20X_ADC_CHANNEL(AXP192_IPSOUT_V, "ipsout_v", IIO_VOLTAGE,
+ AXP20X_IPSOUT_V_HIGH_H),
+ AXP20X_ADC_CHANNEL(AXP192_BATT_V, "batt_v", IIO_VOLTAGE,
+ AXP20X_BATT_V_H),
+ AXP20X_ADC_CHANNEL(AXP192_BATT_CHRG_I, "batt_chrg_i", IIO_CURRENT,
+ AXP20X_BATT_CHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP192_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
+ AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP192_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP20X_TS_IN_H),
+};
+
static const struct iio_chan_spec axp20x_adc_channels[] = {
AXP20X_ADC_CHANNEL(AXP20X_ACIN_V, "acin_v", IIO_VOLTAGE,
AXP20X_ACIN_V_ADC_H),
@@ -231,6 +295,27 @@ static const struct iio_chan_spec axp813_adc_channels[] = {
AXP288_TS_ADC_H),
};
+static int axp192_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+ int ret, size;
+
+ if (chan->type == IIO_CURRENT &&
+ (chan->channel == AXP192_BATT_CHRG_I ||
+ chan->channel == AXP192_BATT_DISCHRG_I))
+ size = 13;
+ else
+ size = 12;
+
+ ret = axp20x_read_variable_width(info->regmap, chan->address, size);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+}
+
static int axp20x_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -283,6 +368,44 @@ static int axp813_adc_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
+static int axp192_adc_scale_voltage(int channel, int *val, int *val2)
+{
+ switch (channel) {
+ case AXP192_ACIN_V:
+ case AXP192_VBUS_V:
+ *val = 1;
+ *val2 = 700000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP192_GPIO0_V:
+ case AXP192_GPIO1_V:
+ case AXP192_GPIO2_V:
+ case AXP192_GPIO3_V:
+ *val = 0;
+ *val2 = 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP192_BATT_V:
+ *val = 1;
+ *val2 = 100000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP192_IPSOUT_V:
+ *val = 1;
+ *val2 = 400000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP192_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp20x_adc_scale_voltage(int channel, int *val, int *val2)
{
switch (channel) {
@@ -386,6 +509,29 @@ static int axp20x_adc_scale_current(int channel, int *val, int *val2)
}
}
+static int axp192_adc_scale(struct iio_chan_spec const *chan, int *val,
+ int *val2)
+{
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return axp192_adc_scale_voltage(chan->channel, val, val2);
+
+ case IIO_CURRENT:
+ /*
+ * AXP192 current channels are identical to the AXP20x,
+ * therefore we can re-use the scaling function.
+ */
+ return axp20x_adc_scale_current(chan->channel, val, val2);
+
+ case IIO_TEMP:
+ *val = 100;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp20x_adc_scale(struct iio_chan_spec const *chan, int *val,
int *val2)
{
@@ -445,6 +591,42 @@ static int axp813_adc_scale(struct iio_chan_spec const *chan, int *val,
}
}
+static int axp192_adc_offset_voltage(struct iio_dev *indio_dev, int channel,
+ int *val)
+{
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, AXP192_GPIO30_IN_RANGE, &regval);
+ if (ret < 0)
+ return ret;
+
+ switch (channel) {
+ case AXP192_GPIO0_V:
+ regval = FIELD_GET(AXP192_GPIO30_IN_RANGE_GPIO0, regval);
+ break;
+
+ case AXP192_GPIO1_V:
+ regval = FIELD_GET(AXP192_GPIO30_IN_RANGE_GPIO1, regval);
+ break;
+
+ case AXP192_GPIO2_V:
+ regval = FIELD_GET(AXP192_GPIO30_IN_RANGE_GPIO2, regval);
+ break;
+
+ case AXP192_GPIO3_V:
+ regval = FIELD_GET(AXP192_GPIO30_IN_RANGE_GPIO3, regval);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *val = regval ? 700000 : 0;
+ return IIO_VAL_INT;
+}
+
static int axp20x_adc_offset_voltage(struct iio_dev *indio_dev, int channel,
int *val)
{
@@ -473,6 +655,22 @@ static int axp20x_adc_offset_voltage(struct iio_dev *indio_dev, int channel,
return IIO_VAL_INT;
}
+static int axp192_adc_offset(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return axp192_adc_offset_voltage(indio_dev, chan->channel, val);
+
+ case IIO_TEMP:
+ *val = -1447;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp20x_adc_offset(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -489,6 +687,25 @@ static int axp20x_adc_offset(struct iio_dev *indio_dev,
}
}
+static int axp192_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return axp192_adc_offset(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ return axp192_adc_scale(chan, val, val2);
+
+ case IIO_CHAN_INFO_RAW:
+ return axp192_adc_raw(indio_dev, chan, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp20x_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -549,6 +766,51 @@ static int axp813_read_raw(struct iio_dev *indio_dev,
}
}
+static int axp192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+ unsigned int regmask, regval;
+
+ /*
+ * The AXP192 PMIC allows the user to choose between 0V and 0.7V offsets
+ * for (independently) GPIO0-3 when in ADC mode.
+ */
+ if (mask != IIO_CHAN_INFO_OFFSET)
+ return -EINVAL;
+
+ if (val != 0 && val != 700000)
+ return -EINVAL;
+
+ switch (chan->channel) {
+ case AXP192_GPIO0_V:
+ regmask = AXP192_GPIO30_IN_RANGE_GPIO0;
+ regval = FIELD_PREP(AXP192_GPIO30_IN_RANGE_GPIO0, !!val);
+ break;
+
+ case AXP192_GPIO1_V:
+ regmask = AXP192_GPIO30_IN_RANGE_GPIO1;
+ regval = FIELD_PREP(AXP192_GPIO30_IN_RANGE_GPIO1, !!val);
+ break;
+
+ case AXP192_GPIO2_V:
+ regmask = AXP192_GPIO30_IN_RANGE_GPIO2;
+ regval = FIELD_PREP(AXP192_GPIO30_IN_RANGE_GPIO2, !!val);
+ break;
+
+ case AXP192_GPIO3_V:
+ regmask = AXP192_GPIO30_IN_RANGE_GPIO3;
+ regval = FIELD_PREP(AXP192_GPIO30_IN_RANGE_GPIO3, !!val);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(info->regmap, AXP192_GPIO30_IN_RANGE, regmask, regval);
+}
+
static int axp20x_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2,
long mask)
@@ -584,6 +846,11 @@ static int axp20x_write_raw(struct iio_dev *indio_dev,
return regmap_update_bits(info->regmap, AXP20X_GPIO10_IN_RANGE, regmask, regval);
}
+static const struct iio_info axp192_adc_iio_info = {
+ .read_raw = axp192_read_raw,
+ .write_raw = axp192_write_raw,
+};
+
static const struct iio_info axp20x_adc_iio_info = {
.read_raw = axp20x_read_raw,
.write_raw = axp20x_write_raw,
@@ -629,6 +896,16 @@ struct axp_data {
struct iio_map *maps;
};
+static const struct axp_data axp192_data = {
+ .iio_info = &axp192_adc_iio_info,
+ .num_channels = ARRAY_SIZE(axp192_adc_channels),
+ .channels = axp192_adc_channels,
+ .adc_en1_mask = AXP192_ADC_EN1_MASK,
+ .adc_en2_mask = AXP192_ADC_EN2_MASK,
+ .adc_rate = axp20x_adc_rate,
+ .maps = axp20x_maps,
+};
+
static const struct axp_data axp20x_data = {
.iio_info = &axp20x_adc_iio_info,
.num_channels = ARRAY_SIZE(axp20x_adc_channels),
@@ -658,6 +935,7 @@ static const struct axp_data axp813_data = {
};
static const struct of_device_id axp20x_adc_of_match[] = {
+ { .compatible = "x-powers,axp192-adc", .data = (void *)&axp192_data, },
{ .compatible = "x-powers,axp209-adc", .data = (void *)&axp20x_data, },
{ .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, },
{ .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, },
@@ -666,6 +944,7 @@ static const struct of_device_id axp20x_adc_of_match[] = {
MODULE_DEVICE_TABLE(of, axp20x_adc_of_match);
static const struct platform_device_id axp20x_adc_id_match[] = {
+ { .name = "axp192-adc", .driver_data = (kernel_ulong_t)&axp192_data, },
{ .name = "axp20x-adc", .driver_data = (kernel_ulong_t)&axp20x_data, },
{ .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, },
{ .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, },
@@ -712,9 +991,8 @@ static int axp20x_probe(struct platform_device *pdev)
regmap_write(info->regmap, AXP20X_ADC_EN1, info->data->adc_en1_mask);
if (info->data->adc_en2_mask)
- regmap_update_bits(info->regmap, AXP20X_ADC_EN2,
- info->data->adc_en2_mask,
- info->data->adc_en2_mask);
+ regmap_set_bits(info->regmap, AXP20X_ADC_EN2,
+ info->data->adc_en2_mask);
/* Configure ADCs rate */
info->data->adc_rate(info, 100);
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 49fff1cabd0d..f135cf2362df 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -247,8 +247,8 @@ static int axp288_adc_initialize(struct axp288_adc_info *info)
return ret;
/* Turn on the ADC for all channels except TS, leave TS as is */
- return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
- AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
+ return regmap_set_bits(info->regmap, AXP20X_ADC_EN1,
+ AXP288_ADC_EN_MASK);
}
static const struct iio_info axp288_adc_iio_info = {
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 5bc514bd5ebc..6bc149c51414 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -357,8 +357,8 @@ static int iproc_adc_enable(struct iio_dev *indio_dev)
int ret;
/* Set i_amux = 3b'000, select channel 0 */
- ret = regmap_update_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL,
- IPROC_ADC_CHANNEL_SEL_MASK, 0);
+ ret = regmap_clear_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL,
+ IPROC_ADC_CHANNEL_SEL_MASK);
if (ret) {
dev_err(&indio_dev->dev,
"failed to write IPROC_ANALOG_CONTROL %d\n", ret);
@@ -543,8 +543,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
if (adc_priv->irqno < 0)
return adc_priv->irqno;
- ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
- IPROC_ADC_AUXIN_SCAN_ENA, 0);
+ ret = regmap_clear_bits(adc_priv->regmap, IPROC_REGCTL2,
+ IPROC_ADC_AUXIN_SCAN_ENA);
if (ret) {
dev_err(&pdev->dev, "failed to write IPROC_REGCTL2 %d\n", ret);
return ret;
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index a4e7c7eff5ac..4cdddc6e36e9 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -129,8 +129,8 @@ static int berlin2_adc_read(struct iio_dev *indio_dev, int channel)
msecs_to_jiffies(1000));
/* Disable the interrupts */
- regmap_update_bits(priv->regmap, BERLIN2_SM_ADC_STATUS,
- BERLIN2_SM_ADC_STATUS_INT_EN(channel), 0);
+ regmap_clear_bits(priv->regmap, BERLIN2_SM_ADC_STATUS,
+ BERLIN2_SM_ADC_STATUS_INT_EN(channel));
if (ret == 0)
ret = -ETIMEDOUT;
@@ -139,8 +139,8 @@ static int berlin2_adc_read(struct iio_dev *indio_dev, int channel)
return ret;
}
- regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
- BERLIN2_SM_CTRL_ADC_START, 0);
+ regmap_clear_bits(priv->regmap, BERLIN2_SM_CTRL,
+ BERLIN2_SM_CTRL_ADC_START);
data = priv->data;
priv->data_available = false;
@@ -180,8 +180,8 @@ static int berlin2_adc_tsen_read(struct iio_dev *indio_dev)
msecs_to_jiffies(1000));
/* Disable interrupts */
- regmap_update_bits(priv->regmap, BERLIN2_SM_TSEN_STATUS,
- BERLIN2_SM_TSEN_STATUS_INT_EN, 0);
+ regmap_clear_bits(priv->regmap, BERLIN2_SM_TSEN_STATUS,
+ BERLIN2_SM_TSEN_STATUS_INT_EN);
if (ret == 0)
ret = -ETIMEDOUT;
@@ -190,8 +190,8 @@ static int berlin2_adc_tsen_read(struct iio_dev *indio_dev)
return ret;
}
- regmap_update_bits(priv->regmap, BERLIN2_SM_TSEN_CTRL,
- BERLIN2_SM_TSEN_CTRL_START, 0);
+ regmap_clear_bits(priv->regmap, BERLIN2_SM_TSEN_CTRL,
+ BERLIN2_SM_TSEN_CTRL_START);
data = priv->data;
priv->data_available = false;
@@ -284,8 +284,7 @@ static const struct iio_info berlin2_adc_info = {
static void berlin2_adc_powerdown(void *regmap)
{
- regmap_update_bits(regmap, BERLIN2_SM_CTRL,
- BERLIN2_SM_CTRL_ADC_POWER, 0);
+ regmap_clear_bits(regmap, BERLIN2_SM_CTRL, BERLIN2_SM_CTRL_ADC_POWER);
}
@@ -339,9 +338,8 @@ static int berlin2_adc_probe(struct platform_device *pdev)
indio_dev->num_channels = ARRAY_SIZE(berlin2_adc_channels);
/* Power up the ADC */
- regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
- BERLIN2_SM_CTRL_ADC_POWER,
- BERLIN2_SM_CTRL_ADC_POWER);
+ regmap_set_bits(priv->regmap, BERLIN2_SM_CTRL,
+ BERLIN2_SM_CTRL_ADC_POWER);
ret = devm_add_action_or_reset(&pdev->dev, berlin2_adc_powerdown,
priv->regmap);
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index b6c4ef70484e..c218acf6c9c6 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -385,9 +385,8 @@ static irqreturn_t cpcap_adc_irq_thread(int irq, void *data)
struct cpcap_adc *ddata = iio_priv(indio_dev);
int error;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ADTRIG_DIS,
- CPCAP_BIT_ADTRIG_DIS);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS);
if (error)
return IRQ_NONE;
@@ -424,23 +423,19 @@ static void cpcap_adc_setup_calibrate(struct cpcap_adc *ddata,
if (error)
return;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ATOX_PS_FACTOR |
- CPCAP_BIT_ADC_PS_FACTOR1 |
- CPCAP_BIT_ADC_PS_FACTOR0,
- 0);
+ error = regmap_clear_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ATOX_PS_FACTOR |
+ CPCAP_BIT_ADC_PS_FACTOR1 |
+ CPCAP_BIT_ADC_PS_FACTOR0);
if (error)
return;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ADTRIG_DIS,
- CPCAP_BIT_ADTRIG_DIS);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS);
if (error)
return;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ASC,
- CPCAP_BIT_ASC);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2, CPCAP_BIT_ASC);
if (error)
return;
@@ -455,8 +450,8 @@ static void cpcap_adc_setup_calibrate(struct cpcap_adc *ddata,
dev_err(ddata->dev,
"Timeout waiting for calibration to complete\n");
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC1,
- CPCAP_BIT_CAL_MODE, 0);
+ error = regmap_clear_bits(ddata->reg, CPCAP_REG_ADCC1,
+ CPCAP_BIT_CAL_MODE);
if (error)
return;
}
@@ -602,26 +597,23 @@ static void cpcap_adc_setup_bank(struct cpcap_adc *ddata,
return;
if (req->timing == CPCAP_ADC_TIMING_IMM) {
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ADTRIG_DIS,
- CPCAP_BIT_ADTRIG_DIS);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS);
if (error)
return;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ASC,
- CPCAP_BIT_ASC);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ASC);
if (error)
return;
} else {
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ADTRIG_ONESHOT,
- CPCAP_BIT_ADTRIG_ONESHOT);
+ error = regmap_set_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_ONESHOT);
if (error)
return;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
- CPCAP_BIT_ADTRIG_DIS, 0);
+ error = regmap_clear_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS);
if (error)
return;
}
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index b680690631db..b3f037510e35 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -87,13 +87,13 @@ static irqreturn_t mx25_gcq_irq(int irq, void *data)
regmap_read(priv->regs, MX25_ADCQ_SR, &stats);
if (stats & MX25_ADCQ_SR_EOQ) {
- regmap_update_bits(priv->regs, MX25_ADCQ_MR,
- MX25_ADCQ_MR_EOQ_IRQ, MX25_ADCQ_MR_EOQ_IRQ);
+ regmap_set_bits(priv->regs, MX25_ADCQ_MR,
+ MX25_ADCQ_MR_EOQ_IRQ);
complete(&priv->completed);
}
/* Disable conversion queue run */
- regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS, 0);
+ regmap_clear_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS);
/* Acknowledge all possible irqs */
regmap_write(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_FRR |
@@ -115,11 +115,10 @@ static int mx25_gcq_get_raw_value(struct device *dev,
regmap_write(priv->regs, MX25_ADCQ_ITEM_7_0,
MX25_ADCQ_ITEM(0, chan->channel));
- regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_EOQ_IRQ, 0);
+ regmap_clear_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_EOQ_IRQ);
/* Trigger queue for one run */
- regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS,
- MX25_ADCQ_CR_FQS);
+ regmap_set_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS);
time_left = wait_for_completion_interruptible_timeout(
&priv->completed, MX25_GCQ_TIMEOUT);
@@ -272,9 +271,8 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
MX25_ADCQ_CFG_REFN_MASK,
refp | refn);
}
- regmap_update_bits(priv->regs, MX25_ADCQ_CR,
- MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST,
- MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST);
+ regmap_set_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST);
regmap_write(priv->regs, MX25_ADCQ_CR,
MX25_ADCQ_CR_PDMSK | MX25_ADCQ_CR_QSM_FQS);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index fef97c1d226a..b3372ccff7d5 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -80,7 +80,6 @@ struct hx711_data {
struct device *dev;
struct gpio_desc *gpiod_pd_sck;
struct gpio_desc *gpiod_dout;
- struct regulator *reg_avdd;
int gain_set; /* gain set on device */
int gain_chan_a; /* gain for channel A */
struct mutex lock;
@@ -465,10 +464,8 @@ static int hx711_probe(struct platform_device *pdev)
int i;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct hx711_data));
- if (!indio_dev) {
- dev_err(dev, "failed to allocate IIO device\n");
- return -ENOMEM;
- }
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate IIO device\n");
hx711_data = iio_priv(indio_dev);
hx711_data->dev = dev;
@@ -480,28 +477,20 @@ static int hx711_probe(struct platform_device *pdev)
* in the driver it is an output
*/
hx711_data->gpiod_pd_sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
- if (IS_ERR(hx711_data->gpiod_pd_sck)) {
- dev_err(dev, "failed to get sck-gpiod: err=%ld\n",
- PTR_ERR(hx711_data->gpiod_pd_sck));
- return PTR_ERR(hx711_data->gpiod_pd_sck);
- }
+ if (IS_ERR(hx711_data->gpiod_pd_sck))
+ return dev_err_probe(dev, PTR_ERR(hx711_data->gpiod_pd_sck),
+ "failed to get sck-gpiod\n");
/*
* DOUT stands for serial data output of HX711
* for the driver it is an input
*/
hx711_data->gpiod_dout = devm_gpiod_get(dev, "dout", GPIOD_IN);
- if (IS_ERR(hx711_data->gpiod_dout)) {
- dev_err(dev, "failed to get dout-gpiod: err=%ld\n",
- PTR_ERR(hx711_data->gpiod_dout));
- return PTR_ERR(hx711_data->gpiod_dout);
- }
+ if (IS_ERR(hx711_data->gpiod_dout))
+ return dev_err_probe(dev, PTR_ERR(hx711_data->gpiod_dout),
+ "failed to get dout-gpiod\n");
- hx711_data->reg_avdd = devm_regulator_get(dev, "avdd");
- if (IS_ERR(hx711_data->reg_avdd))
- return PTR_ERR(hx711_data->reg_avdd);
-
- ret = regulator_enable(hx711_data->reg_avdd);
+ ret = devm_regulator_get_enable_read_voltage(dev, "avdd");
if (ret < 0)
return ret;
@@ -517,9 +506,6 @@ static int hx711_probe(struct platform_device *pdev)
* approximately to fit into a 32 bit number:
* 1 LSB = (AVDD * 100) / GAIN / 1678 [10^-9 mV]
*/
- ret = regulator_get_voltage(hx711_data->reg_avdd);
- if (ret < 0)
- goto error_regulator;
/* we need 10^-9 mV */
ret *= 100;
@@ -547,51 +533,24 @@ static int hx711_probe(struct platform_device *pdev)
hx711_data->data_ready_delay_ns =
1000000000 / hx711_data->clock_frequency;
- platform_set_drvdata(pdev, indio_dev);
-
indio_dev->name = "hx711";
indio_dev->info = &hx711_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = hx711_chan_spec;
indio_dev->num_channels = ARRAY_SIZE(hx711_chan_spec);
- ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
- hx711_trigger, NULL);
- if (ret < 0) {
- dev_err(dev, "setup of iio triggered buffer failed\n");
- goto error_regulator;
- }
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ hx711_trigger, NULL);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "setup of iio triggered buffer failed\n");
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(dev, "Couldn't register the device\n");
- goto error_buffer;
- }
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Couldn't register the device\n");
return 0;
-
-error_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
-error_regulator:
- regulator_disable(hx711_data->reg_avdd);
-
- return ret;
-}
-
-static void hx711_remove(struct platform_device *pdev)
-{
- struct hx711_data *hx711_data;
- struct iio_dev *indio_dev;
-
- indio_dev = platform_get_drvdata(pdev);
- hx711_data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
-
- regulator_disable(hx711_data->reg_avdd);
}
static const struct of_device_id of_hx711_match[] = {
@@ -603,7 +562,6 @@ MODULE_DEVICE_TABLE(of, of_hx711_match);
static struct platform_driver hx711_driver = {
.probe = hx711_probe,
- .remove_new = hx711_remove,
.driver = {
.name = "hx711-gpio",
.of_match_table = of_hx711_match,
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 9e52207352fb..727e390bd979 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -1046,8 +1046,7 @@ static void ina2xx_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
/* Powerdown */
- ret = regmap_update_bits(chip->regmap, INA2XX_CONFIG,
- INA2XX_MODE_MASK, 0);
+ ret = regmap_clear_bits(chip->regmap, INA2XX_CONFIG, INA2XX_MODE_MASK);
if (ret)
dev_warn(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index a7325dbbb99a..af70ca760797 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -920,4 +920,5 @@ static struct platform_driver ingenic_adc_driver = {
.probe = ingenic_adc_probe,
};
module_platform_driver(ingenic_adc_driver);
+MODULE_DESCRIPTION("ADC driver for the Ingenic JZ47xx SoCs");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index c7f40ae6e608..0590a126f321 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -81,8 +81,8 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
reinit_completion(&adc->completion);
- regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0);
- regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0);
+ regmap_clear_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL);
+ regmap_clear_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC);
ret = regmap_read_poll_timeout(regmap, BCOVE_GPADCREQ, req,
!(req & BCOVE_GPADCREQ_BUSY),
diff --git a/drivers/iio/adc/ltc2309.c b/drivers/iio/adc/ltc2309.c
index 8b3a89c1b840..5f0d947d0615 100644
--- a/drivers/iio/adc/ltc2309.c
+++ b/drivers/iio/adc/ltc2309.c
@@ -16,6 +16,7 @@
#include <linux/regulator/consumer.h>
#define LTC2309_ADC_RESOLUTION 12
+#define LTC2309_INTERNAL_REF_MV 4096
#define LTC2309_DIN_CH_MASK GENMASK(7, 4)
#define LTC2309_DIN_SDN BIT(7)
@@ -29,14 +30,12 @@
* struct ltc2309 - internal device data structure
* @dev: Device reference
* @client: I2C reference
- * @vref: External reference source
* @lock: Lock to serialize data access
* @vref_mv: Internal voltage reference
*/
struct ltc2309 {
struct device *dev;
struct i2c_client *client;
- struct regulator *vref;
struct mutex lock; /* serialize data access */
int vref_mv;
};
@@ -104,7 +103,7 @@ static int ltc2309_read_raw_channel(struct ltc2309 *ltc2309,
unsigned long address, int *val)
{
int ret;
- u16 buf;
+ __be16 buf;
u8 din;
din = FIELD_PREP(LTC2309_DIN_CH_MASK, address & 0x0f) |
@@ -157,11 +156,6 @@ static const struct iio_info ltc2309_info = {
.read_raw = ltc2309_read_raw,
};
-static void ltc2309_regulator_disable(void *regulator)
-{
- regulator_disable(regulator);
-}
-
static int ltc2309_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
@@ -175,7 +169,6 @@ static int ltc2309_probe(struct i2c_client *client)
ltc2309 = iio_priv(indio_dev);
ltc2309->dev = &indio_dev->dev;
ltc2309->client = client;
- ltc2309->vref_mv = 4096; /* Default to the internal ref */
indio_dev->name = "ltc2309";
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -183,36 +176,12 @@ static int ltc2309_probe(struct i2c_client *client)
indio_dev->num_channels = ARRAY_SIZE(ltc2309_channels);
indio_dev->info = &ltc2309_info;
- ltc2309->vref = devm_regulator_get_optional(&client->dev, "vref");
- if (IS_ERR(ltc2309->vref)) {
- ret = PTR_ERR(ltc2309->vref);
- if (ret == -ENODEV)
- ltc2309->vref = NULL;
- else
- return ret;
- }
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(ltc2309->dev, ret,
+ "failed to get vref voltage\n");
- if (ltc2309->vref) {
- ret = regulator_enable(ltc2309->vref);
- if (ret)
- return dev_err_probe(ltc2309->dev, ret,
- "failed to enable vref\n");
-
- ret = devm_add_action_or_reset(ltc2309->dev,
- ltc2309_regulator_disable,
- ltc2309->vref);
- if (ret) {
- return dev_err_probe(ltc2309->dev, ret,
- "failed to add regulator_disable action: %d\n",
- ret);
- }
-
- ret = regulator_get_voltage(ltc2309->vref);
- if (ret < 0)
- return ret;
-
- ltc2309->vref_mv = ret / 1000;
- }
+ ltc2309->vref_mv = ret == -ENODEV ? LTC2309_INTERNAL_REF_MV : ret / 1000;
mutex_init(&ltc2309->lock);
diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c
index 859e4314cfa2..060651dd4130 100644
--- a/drivers/iio/adc/ltc2485.c
+++ b/drivers/iio/adc/ltc2485.c
@@ -124,7 +124,7 @@ static int ltc2485_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc2485_id[] = {
- { "ltc2485", 0 },
+ { "ltc2485" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc2485_id);
diff --git a/drivers/iio/adc/max11205.c b/drivers/iio/adc/max11205.c
index 65fc32971ba5..9d8bc0b154dd 100644
--- a/drivers/iio/adc/max11205.c
+++ b/drivers/iio/adc/max11205.c
@@ -116,10 +116,7 @@ static int max11205_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &max11205_sigma_delta_info);
- st->chip_info = device_get_match_data(&spi->dev);
- if (!st->chip_info)
- st->chip_info =
- (const struct max11205_chip_info *)spi_get_device_id(spi)->driver_data;
+ st->chip_info = spi_get_device_match_data(spi);
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 8b5bc96cb9fb..bf4b6dc53fd2 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1561,18 +1561,12 @@ static const struct of_device_id max1363_of_match[] = {
};
MODULE_DEVICE_TABLE(of, max1363_of_match);
-static void max1363_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int max1363_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
int ret;
struct max1363_state *st;
struct iio_dev *indio_dev;
- struct regulator *vref;
indio_dev = devm_iio_device_alloc(&client->dev,
sizeof(struct max1363_state));
@@ -1589,26 +1583,12 @@ static int max1363_probe(struct i2c_client *client)
st->chip_info = i2c_get_match_data(client);
st->client = client;
- st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get_optional(&client->dev, "vref");
- if (!IS_ERR(vref)) {
- int vref_uv;
-
- ret = regulator_enable(vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&client->dev, max1363_reg_disable, vref);
- if (ret)
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
- st->vref = vref;
- vref_uv = regulator_get_voltage(vref);
- if (vref_uv <= 0)
- return -EINVAL;
- st->vref_uv = vref_uv;
- }
+ st->vref_uv = ret == -ENODEV ? st->chip_info->int_vref_mv * 1000 : ret;
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
st->send = i2c_master_send;
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index e2ae13f1e842..d83bed0e63d2 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -1114,7 +1114,6 @@ static int mcp3564_config(struct iio_dev *indio_dev)
{
struct mcp3564_state *adc = iio_priv(indio_dev);
struct device *dev = &adc->spi->dev;
- const struct spi_device_id *dev_id;
u8 tmp_reg;
u16 tmp_u16;
enum mcp3564_ids ids;
@@ -1212,11 +1211,6 @@ static int mcp3564_config(struct iio_dev *indio_dev)
* try using fallback compatible in device tree to deal with some newer part number.
*/
adc->chip_info = spi_get_device_match_data(adc->spi);
- if (!adc->chip_info) {
- dev_id = spi_get_device_id(adc->spi);
- adc->chip_info = (const struct mcp3564_chip_info *)dev_id->driver_data;
- }
-
adc->have_vref = adc->chip_info->have_vref;
} else {
adc->chip_info = &mcp3564_chip_infos_tbl[ids];
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 13b473d8c6c7..e16b0e28974e 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -546,35 +546,31 @@ static void meson_sar_adc_start_sample_engine(struct iio_dev *indio_dev)
reinit_completion(&priv->done);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_FIFO_IRQ_EN,
- MESON_SAR_ADC_REG0_FIFO_IRQ_EN);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_FIFO_IRQ_EN);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE,
- MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_SAMPLING_START,
- MESON_SAR_ADC_REG0_SAMPLING_START);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_SAMPLING_START);
}
static void meson_sar_adc_stop_sample_engine(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_FIFO_IRQ_EN, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_FIFO_IRQ_EN);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_SAMPLING_STOP,
- MESON_SAR_ADC_REG0_SAMPLING_STOP);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_SAMPLING_STOP);
/* wait until all modules are stopped */
meson_sar_adc_wait_busy_clear(indio_dev);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE);
}
static int meson_sar_adc_lock(struct iio_dev *indio_dev)
@@ -586,9 +582,8 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
if (priv->param->has_bl30_integration) {
/* prevent BL30 from using the SAR ADC while we are using it */
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
- MESON_SAR_ADC_DELAY_KERNEL_BUSY,
- MESON_SAR_ADC_DELAY_KERNEL_BUSY);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_DELAY,
+ MESON_SAR_ADC_DELAY_KERNEL_BUSY);
udelay(1);
@@ -614,8 +609,8 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
if (priv->param->has_bl30_integration)
/* allow BL30 to use the SAR ADC again */
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
- MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_DELAY,
+ MESON_SAR_ADC_DELAY_KERNEL_BUSY);
mutex_unlock(&priv->lock);
}
@@ -869,17 +864,16 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
* disable this bit as seems to be only relevant for Meson6 (based
* on the vendor driver), which we don't support at the moment.
*/
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
- MESON_SAR_ADC_REG0_ADC_TEMP_SEN_SEL, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_ADC_TEMP_SEN_SEL);
/* disable all channels by default */
regmap_write(priv->regmap, MESON_SAR_ADC_CHAN_LIST, 0x0);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_CTRL_SAMPLING_CLOCK_PHASE, 0);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY,
- MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_CTRL_SAMPLING_CLOCK_PHASE);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY);
/* delay between two samples = (10+1) * 1uS */
regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
@@ -914,21 +908,17 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK,
regval);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW,
- MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW);
/*
* set up the input channel muxes in MESON_SAR_ADC_AUX_SW
@@ -944,12 +934,10 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
regmap_write(priv->regmap, MESON_SAR_ADC_AUX_SW, regval);
if (priv->temperature_sensor_calibrated) {
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
- MESON_SAR_ADC_DELTA_10_TS_REVE1,
- MESON_SAR_ADC_DELTA_10_TS_REVE1);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
- MESON_SAR_ADC_DELTA_10_TS_REVE0,
- MESON_SAR_ADC_DELTA_10_TS_REVE0);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_REVE1);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_REVE0);
/*
* set bits [3:0] of the TSC (temperature sensor coefficient)
@@ -976,10 +964,10 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
regval);
}
} else {
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
- MESON_SAR_ADC_DELTA_10_TS_REVE1, 0);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
- MESON_SAR_ADC_DELTA_10_TS_REVE0, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_REVE1);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_REVE0);
}
regval = FIELD_PREP(MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN,
@@ -1062,9 +1050,8 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
meson_sar_adc_set_bandgap(indio_dev, true);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_ADC_EN,
- MESON_SAR_ADC_REG3_ADC_EN);
+ regmap_set_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_ADC_EN);
udelay(5);
@@ -1079,8 +1066,8 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
return 0;
err_adc_clk:
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_ADC_EN, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_ADC_EN);
meson_sar_adc_set_bandgap(indio_dev, false);
regulator_disable(priv->vref);
err_vref:
@@ -1104,8 +1091,8 @@ static void meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
clk_disable_unprepare(priv->adc_clk);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_ADC_EN, 0);
+ regmap_clear_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_ADC_EN);
meson_sar_adc_set_bandgap(indio_dev, false);
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 7c66c2cd5be2..5f672765d4a2 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -131,9 +131,8 @@ static int mp2629_adc_probe(struct platform_device *pdev)
info->dev = dev;
platform_set_drvdata(pdev, indio_dev);
- ret = regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
- MP2629_ADC_START | MP2629_ADC_CONTINUOUS,
- MP2629_ADC_START | MP2629_ADC_CONTINUOUS);
+ ret = regmap_set_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS);
if (ret) {
dev_err(dev, "adc enable fail: %d\n", ret);
return ret;
@@ -163,10 +162,9 @@ fail_map_unregister:
iio_map_array_unregister(indio_dev);
fail_disable:
- regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
- MP2629_ADC_CONTINUOUS, 0);
- regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
- MP2629_ADC_START, 0);
+ regmap_clear_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS);
+ regmap_clear_bits(info->regmap, MP2629_REG_ADC_CTRL, MP2629_ADC_START);
return ret;
}
@@ -180,10 +178,9 @@ static void mp2629_adc_remove(struct platform_device *pdev)
iio_map_array_unregister(indio_dev);
- regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
- MP2629_ADC_CONTINUOUS, 0);
- regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
- MP2629_ADC_START, 0);
+ regmap_clear_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS);
+ regmap_clear_bits(info->regmap, MP2629_REG_ADC_CTRL, MP2629_ADC_START);
}
static const struct of_device_id mp2629_adc_of_match[] = {
diff --git a/drivers/iio/adc/mt6359-auxadc.c b/drivers/iio/adc/mt6359-auxadc.c
new file mode 100644
index 000000000000..a4970cfb49a5
--- /dev/null
+++ b/drivers/iio/adc/mt6359-auxadc.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek MT6359 PMIC AUXADC IIO driver
+ *
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/mt6397/core.h>
+
+#include <dt-bindings/iio/adc/mediatek,mt6357-auxadc.h>
+#include <dt-bindings/iio/adc/mediatek,mt6358-auxadc.h>
+#include <dt-bindings/iio/adc/mediatek,mt6359-auxadc.h>
+
+#define AUXADC_AVG_TIME_US 10
+#define AUXADC_POLL_DELAY_US 100
+#define AUXADC_TIMEOUT_US 32000
+#define AUXADC_VOLT_FULL 1800
+#define IMP_STOP_DELAY_US 150
+#define IMP_POLL_DELAY_US 1000
+
+/* For PMIC_RG_RESET_VAL and MT6358_IMP0_CLEAR, the bits specific purpose is unknown. */
+#define PMIC_RG_RESET_VAL (BIT(0) | BIT(3))
+#define PMIC_AUXADC_RDY_BIT BIT(15)
+#define MT6357_IMP_ADC_NUM 30
+#define MT6358_IMP_ADC_NUM 28
+
+#define MT6358_DCM_CK_SW_EN GENMASK(1, 0)
+#define MT6358_IMP0_CLEAR (BIT(14) | BIT(7))
+#define MT6358_IMP0_IRQ_RDY BIT(8)
+#define MT6358_IMP1_AUTOREPEAT_EN BIT(15)
+
+#define MT6359_IMP0_CONV_EN BIT(0)
+#define MT6359_IMP1_IRQ_RDY BIT(15)
+
+enum mtk_pmic_auxadc_regs {
+ PMIC_AUXADC_ADC0,
+ PMIC_AUXADC_DCM_CON,
+ PMIC_AUXADC_IMP0,
+ PMIC_AUXADC_IMP1,
+ PMIC_AUXADC_IMP3,
+ PMIC_AUXADC_RQST0,
+ PMIC_AUXADC_RQST1,
+ PMIC_HK_TOP_WKEY,
+ PMIC_HK_TOP_RST_CON0,
+ PMIC_FGADC_R_CON0,
+ PMIC_AUXADC_REGS_MAX
+};
+
+enum mtk_pmic_auxadc_channels {
+ PMIC_AUXADC_CHAN_BATADC,
+ PMIC_AUXADC_CHAN_ISENSE,
+ PMIC_AUXADC_CHAN_VCDT,
+ PMIC_AUXADC_CHAN_BAT_TEMP,
+ PMIC_AUXADC_CHAN_BATID,
+ PMIC_AUXADC_CHAN_CHIP_TEMP,
+ PMIC_AUXADC_CHAN_VCORE_TEMP,
+ PMIC_AUXADC_CHAN_VPROC_TEMP,
+ PMIC_AUXADC_CHAN_VGPU_TEMP,
+ PMIC_AUXADC_CHAN_ACCDET,
+ PMIC_AUXADC_CHAN_VDCXO,
+ PMIC_AUXADC_CHAN_TSX_TEMP,
+ PMIC_AUXADC_CHAN_HPOFS_CAL,
+ PMIC_AUXADC_CHAN_DCXO_TEMP,
+ PMIC_AUXADC_CHAN_VBIF,
+ PMIC_AUXADC_CHAN_IBAT,
+ PMIC_AUXADC_CHAN_VBAT,
+ PMIC_AUXADC_CHAN_MAX
+};
+
+/**
+ * struct mt6359_auxadc - Main driver structure
+ * @dev: Device pointer
+ * @regmap: Regmap from SoC PMIC Wrapper
+ * @chip_info: PMIC specific chip info
+ * @lock: Mutex to serialize AUXADC reading vs configuration
+ * @timed_out: Signals whether the last read timed out
+ */
+struct mt6359_auxadc {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct mtk_pmic_auxadc_info *chip_info;
+ struct mutex lock;
+ bool timed_out;
+};
+
+/**
+ * struct mtk_pmic_auxadc_chan - PMIC AUXADC channel data
+ * @req_idx: Request register number
+ * @req_mask: Bitmask to activate a channel
+ * @num_samples: Number of AUXADC samples for averaging
+ * @r_ratio: Resistance ratio fractional
+ */
+struct mtk_pmic_auxadc_chan {
+ u8 req_idx;
+ u16 req_mask;
+ u16 num_samples;
+ struct u8_fract r_ratio;
+};
+
+/**
+ * struct mtk_pmic_auxadc_info - PMIC specific chip info
+ * @model_name: PMIC model name
+ * @channels: IIO specification of ADC channels
+ * @num_channels: Number of ADC channels
+ * @desc: PMIC AUXADC channel data
+ * @regs: List of PMIC specific registers
+ * @sec_unlock_key: Security unlock key for HK_TOP writes
+ * @imp_adc_num: ADC channel for battery impedance readings
+ * @read_imp: Callback to read impedance channels
+ */
+struct mtk_pmic_auxadc_info {
+ const char *model_name;
+ const struct iio_chan_spec *channels;
+ u8 num_channels;
+ const struct mtk_pmic_auxadc_chan *desc;
+ const u16 *regs;
+ u16 sec_unlock_key;
+ u8 imp_adc_num;
+ int (*read_imp)(struct mt6359_auxadc *adc_dev, int *vbat, int *ibat);
+};
+
+#define MTK_PMIC_ADC_CHAN(_ch_idx, _req_idx, _req_bit, _samples, _rnum, _rdiv) \
+ [PMIC_AUXADC_CHAN_##_ch_idx] = { \
+ .req_idx = _req_idx, \
+ .req_mask = BIT(_req_bit), \
+ .num_samples = _samples, \
+ .r_ratio = { _rnum, _rdiv } \
+ }
+
+#define MTK_PMIC_IIO_CHAN(_model, _name, _ch_idx, _adc_idx, _nbits, _ch_type) \
+{ \
+ .type = _ch_type, \
+ .channel = _model##_AUXADC_##_ch_idx, \
+ .address = _adc_idx, \
+ .scan_index = PMIC_AUXADC_CHAN_##_ch_idx, \
+ .datasheet_name = __stringify(_name), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = _nbits, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU \
+ }, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+static const struct iio_chan_spec mt6357_auxadc_channels[] = {
+ MTK_PMIC_IIO_CHAN(MT6357, bat_adc, BATADC, 0, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6357, isense, ISENSE, 1, 12, IIO_CURRENT),
+ MTK_PMIC_IIO_CHAN(MT6357, cdt_v, VCDT, 2, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, batt_temp, BAT_TEMP, 3, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, chip_temp, CHIP_TEMP, 4, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, acc_det, ACCDET, 5, 12, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6357, dcxo_v, VDCXO, 6, 12, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6357, tsx_temp, TSX_TEMP, 7, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, hp_ofs_cal, HPOFS_CAL, 9, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6357, dcxo_temp, DCXO_TEMP, 36, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, vcore_temp, VCORE_TEMP, 40, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6357, vproc_temp, VPROC_TEMP, 41, 12, IIO_TEMP),
+
+ /* Battery impedance channels */
+ MTK_PMIC_IIO_CHAN(MT6357, batt_v, VBAT, 0, 15, IIO_VOLTAGE),
+};
+
+static const struct mtk_pmic_auxadc_chan mt6357_auxadc_ch_desc[] = {
+ MTK_PMIC_ADC_CHAN(BATADC, PMIC_AUXADC_RQST0, 0, 128, 3, 1),
+ MTK_PMIC_ADC_CHAN(ISENSE, PMIC_AUXADC_RQST0, 0, 128, 3, 1),
+ MTK_PMIC_ADC_CHAN(VCDT, PMIC_AUXADC_RQST0, 0, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(BAT_TEMP, PMIC_AUXADC_RQST0, 3, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(CHIP_TEMP, PMIC_AUXADC_RQST0, 4, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(ACCDET, PMIC_AUXADC_RQST0, 5, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(TSX_TEMP, PMIC_AUXADC_RQST0, 7, 128, 1, 1),
+ MTK_PMIC_ADC_CHAN(HPOFS_CAL, PMIC_AUXADC_RQST0, 9, 256, 1, 1),
+ MTK_PMIC_ADC_CHAN(DCXO_TEMP, PMIC_AUXADC_RQST0, 10, 16, 1, 1),
+ MTK_PMIC_ADC_CHAN(VBIF, PMIC_AUXADC_RQST0, 11, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VCORE_TEMP, PMIC_AUXADC_RQST1, 5, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VPROC_TEMP, PMIC_AUXADC_RQST1, 6, 8, 1, 1),
+
+ /* Battery impedance channels */
+ MTK_PMIC_ADC_CHAN(VBAT, 0, 0, 128, 3, 1),
+};
+
+static const u16 mt6357_auxadc_regs[] = {
+ [PMIC_HK_TOP_RST_CON0] = 0x0f90,
+ [PMIC_AUXADC_DCM_CON] = 0x122e,
+ [PMIC_AUXADC_ADC0] = 0x1088,
+ [PMIC_AUXADC_IMP0] = 0x119c,
+ [PMIC_AUXADC_IMP1] = 0x119e,
+ [PMIC_AUXADC_RQST0] = 0x110e,
+ [PMIC_AUXADC_RQST1] = 0x1114,
+};
+
+static const struct iio_chan_spec mt6358_auxadc_channels[] = {
+ MTK_PMIC_IIO_CHAN(MT6358, bat_adc, BATADC, 0, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6358, cdt_v, VCDT, 2, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, batt_temp, BAT_TEMP, 3, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, chip_temp, CHIP_TEMP, 4, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, acc_det, ACCDET, 5, 12, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6358, dcxo_v, VDCXO, 6, 12, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6358, tsx_temp, TSX_TEMP, 7, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, hp_ofs_cal, HPOFS_CAL, 9, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6358, dcxo_temp, DCXO_TEMP, 10, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, bif_v, VBIF, 11, 12, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6358, vcore_temp, VCORE_TEMP, 38, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, vproc_temp, VPROC_TEMP, 39, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6358, vgpu_temp, VGPU_TEMP, 40, 12, IIO_TEMP),
+
+ /* Battery impedance channels */
+ MTK_PMIC_IIO_CHAN(MT6358, batt_v, VBAT, 0, 15, IIO_VOLTAGE),
+};
+
+static const struct mtk_pmic_auxadc_chan mt6358_auxadc_ch_desc[] = {
+ MTK_PMIC_ADC_CHAN(BATADC, PMIC_AUXADC_RQST0, 0, 128, 3, 1),
+ MTK_PMIC_ADC_CHAN(VCDT, PMIC_AUXADC_RQST0, 0, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(BAT_TEMP, PMIC_AUXADC_RQST0, 3, 8, 2, 1),
+ MTK_PMIC_ADC_CHAN(CHIP_TEMP, PMIC_AUXADC_RQST0, 4, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(ACCDET, PMIC_AUXADC_RQST0, 5, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VDCXO, PMIC_AUXADC_RQST0, 6, 8, 3, 2),
+ MTK_PMIC_ADC_CHAN(TSX_TEMP, PMIC_AUXADC_RQST0, 7, 128, 1, 1),
+ MTK_PMIC_ADC_CHAN(HPOFS_CAL, PMIC_AUXADC_RQST0, 9, 256, 1, 1),
+ MTK_PMIC_ADC_CHAN(DCXO_TEMP, PMIC_AUXADC_RQST0, 10, 16, 1, 1),
+ MTK_PMIC_ADC_CHAN(VBIF, PMIC_AUXADC_RQST0, 11, 8, 2, 1),
+ MTK_PMIC_ADC_CHAN(VCORE_TEMP, PMIC_AUXADC_RQST1, 8, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VPROC_TEMP, PMIC_AUXADC_RQST1, 9, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VGPU_TEMP, PMIC_AUXADC_RQST1, 10, 8, 1, 1),
+
+ /* Battery impedance channels */
+ MTK_PMIC_ADC_CHAN(VBAT, 0, 0, 128, 7, 2),
+};
+
+static const u16 mt6358_auxadc_regs[] = {
+ [PMIC_HK_TOP_RST_CON0] = 0x0f90,
+ [PMIC_AUXADC_DCM_CON] = 0x1260,
+ [PMIC_AUXADC_ADC0] = 0x1088,
+ [PMIC_AUXADC_IMP0] = 0x1208,
+ [PMIC_AUXADC_IMP1] = 0x120a,
+ [PMIC_AUXADC_RQST0] = 0x1108,
+ [PMIC_AUXADC_RQST1] = 0x110a,
+};
+
+static const struct iio_chan_spec mt6359_auxadc_channels[] = {
+ MTK_PMIC_IIO_CHAN(MT6359, bat_adc, BATADC, 0, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6359, batt_temp, BAT_TEMP, 3, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, chip_temp, CHIP_TEMP, 4, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, acc_det, ACCDET, 5, 12, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6359, dcxo_v, VDCXO, 6, 12, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6359, tsx_temp, TSX_TEMP, 7, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, hp_ofs_cal, HPOFS_CAL, 9, 15, IIO_RESISTANCE),
+ MTK_PMIC_IIO_CHAN(MT6359, dcxo_temp, DCXO_TEMP, 10, 15, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, bif_v, VBIF, 11, 12, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6359, vcore_temp, VCORE_TEMP, 30, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, vproc_temp, VPROC_TEMP, 31, 12, IIO_TEMP),
+ MTK_PMIC_IIO_CHAN(MT6359, vgpu_temp, VGPU_TEMP, 32, 12, IIO_TEMP),
+
+ /* Battery impedance channels */
+ MTK_PMIC_IIO_CHAN(MT6359, batt_v, VBAT, 0, 15, IIO_VOLTAGE),
+ MTK_PMIC_IIO_CHAN(MT6359, batt_i, IBAT, 0, 15, IIO_CURRENT),
+};
+
+static const struct mtk_pmic_auxadc_chan mt6359_auxadc_ch_desc[] = {
+ MTK_PMIC_ADC_CHAN(BATADC, PMIC_AUXADC_RQST0, 0, 128, 7, 2),
+ MTK_PMIC_ADC_CHAN(BAT_TEMP, PMIC_AUXADC_RQST0, 3, 8, 5, 2),
+ MTK_PMIC_ADC_CHAN(CHIP_TEMP, PMIC_AUXADC_RQST0, 4, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(ACCDET, PMIC_AUXADC_RQST0, 5, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VDCXO, PMIC_AUXADC_RQST0, 6, 8, 3, 2),
+ MTK_PMIC_ADC_CHAN(TSX_TEMP, PMIC_AUXADC_RQST0, 7, 128, 1, 1),
+ MTK_PMIC_ADC_CHAN(HPOFS_CAL, PMIC_AUXADC_RQST0, 9, 256, 1, 1),
+ MTK_PMIC_ADC_CHAN(DCXO_TEMP, PMIC_AUXADC_RQST0, 10, 16, 1, 1),
+ MTK_PMIC_ADC_CHAN(VBIF, PMIC_AUXADC_RQST0, 11, 8, 5, 2),
+ MTK_PMIC_ADC_CHAN(VCORE_TEMP, PMIC_AUXADC_RQST1, 8, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VPROC_TEMP, PMIC_AUXADC_RQST1, 9, 8, 1, 1),
+ MTK_PMIC_ADC_CHAN(VGPU_TEMP, PMIC_AUXADC_RQST1, 10, 8, 1, 1),
+
+ /* Battery impedance channels */
+ MTK_PMIC_ADC_CHAN(VBAT, 0, 0, 128, 7, 2),
+ MTK_PMIC_ADC_CHAN(IBAT, 0, 0, 128, 7, 2),
+};
+
+static const u16 mt6359_auxadc_regs[] = {
+ [PMIC_FGADC_R_CON0] = 0x0d88,
+ [PMIC_HK_TOP_WKEY] = 0x0fb4,
+ [PMIC_HK_TOP_RST_CON0] = 0x0f90,
+ [PMIC_AUXADC_RQST0] = 0x1108,
+ [PMIC_AUXADC_RQST1] = 0x110a,
+ [PMIC_AUXADC_ADC0] = 0x1088,
+ [PMIC_AUXADC_IMP0] = 0x1208,
+ [PMIC_AUXADC_IMP1] = 0x120a,
+ [PMIC_AUXADC_IMP3] = 0x120e,
+};
+
+static void mt6358_stop_imp_conv(struct mt6359_auxadc *adc_dev)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ struct regmap *regmap = adc_dev->regmap;
+
+ regmap_set_bits(regmap, cinfo->regs[PMIC_AUXADC_IMP0], MT6358_IMP0_CLEAR);
+ regmap_clear_bits(regmap, cinfo->regs[PMIC_AUXADC_IMP0], MT6358_IMP0_CLEAR);
+ regmap_clear_bits(regmap, cinfo->regs[PMIC_AUXADC_IMP1], MT6358_IMP1_AUTOREPEAT_EN);
+ regmap_clear_bits(regmap, cinfo->regs[PMIC_AUXADC_DCM_CON], MT6358_DCM_CK_SW_EN);
+}
+
+static int mt6358_start_imp_conv(struct mt6359_auxadc *adc_dev)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ struct regmap *regmap = adc_dev->regmap;
+ u32 val;
+ int ret;
+
+ regmap_set_bits(regmap, cinfo->regs[PMIC_AUXADC_DCM_CON], MT6358_DCM_CK_SW_EN);
+ regmap_set_bits(regmap, cinfo->regs[PMIC_AUXADC_IMP1], MT6358_IMP1_AUTOREPEAT_EN);
+
+ ret = regmap_read_poll_timeout(adc_dev->regmap, cinfo->regs[PMIC_AUXADC_IMP0],
+ val, val & MT6358_IMP0_IRQ_RDY,
+ IMP_POLL_DELAY_US, AUXADC_TIMEOUT_US);
+ if (ret) {
+ mt6358_stop_imp_conv(adc_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt6358_read_imp(struct mt6359_auxadc *adc_dev, int *vbat, int *ibat)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ struct regmap *regmap = adc_dev->regmap;
+ u16 reg_adc0 = cinfo->regs[PMIC_AUXADC_ADC0];
+ u32 val_v;
+ int ret;
+
+ ret = mt6358_start_imp_conv(adc_dev);
+ if (ret)
+ return ret;
+
+ /* Read the params before stopping */
+ regmap_read(regmap, reg_adc0 + (cinfo->imp_adc_num << 1), &val_v);
+
+ mt6358_stop_imp_conv(adc_dev);
+
+ if (vbat)
+ *vbat = val_v;
+ if (ibat)
+ *ibat = 0;
+
+ return 0;
+}
+
+static int mt6359_read_imp(struct mt6359_auxadc *adc_dev, int *vbat, int *ibat)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ struct regmap *regmap = adc_dev->regmap;
+ u32 val, val_v, val_i;
+ int ret;
+
+ /* Start conversion */
+ regmap_write(regmap, cinfo->regs[PMIC_AUXADC_IMP0], MT6359_IMP0_CONV_EN);
+ ret = regmap_read_poll_timeout(regmap, cinfo->regs[PMIC_AUXADC_IMP1],
+ val, val & MT6359_IMP1_IRQ_RDY,
+ IMP_POLL_DELAY_US, AUXADC_TIMEOUT_US);
+
+ /* Stop conversion regardless of the result */
+ regmap_write(regmap, cinfo->regs[PMIC_AUXADC_IMP0], 0);
+ if (ret)
+ return ret;
+
+ /* If it succeeded, wait for the registers to be populated */
+ fsleep(IMP_STOP_DELAY_US);
+
+ ret = regmap_read(regmap, cinfo->regs[PMIC_AUXADC_IMP3], &val_v);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, cinfo->regs[PMIC_FGADC_R_CON0], &val_i);
+ if (ret)
+ return ret;
+
+ if (vbat)
+ *vbat = val_v;
+ if (ibat)
+ *ibat = val_i;
+
+ return 0;
+}
+
+static const struct mtk_pmic_auxadc_info mt6357_chip_info = {
+ .model_name = "MT6357",
+ .channels = mt6357_auxadc_channels,
+ .num_channels = ARRAY_SIZE(mt6357_auxadc_channels),
+ .desc = mt6357_auxadc_ch_desc,
+ .regs = mt6357_auxadc_regs,
+ .imp_adc_num = MT6357_IMP_ADC_NUM,
+ .read_imp = mt6358_read_imp,
+};
+
+static const struct mtk_pmic_auxadc_info mt6358_chip_info = {
+ .model_name = "MT6358",
+ .channels = mt6358_auxadc_channels,
+ .num_channels = ARRAY_SIZE(mt6358_auxadc_channels),
+ .desc = mt6358_auxadc_ch_desc,
+ .regs = mt6358_auxadc_regs,
+ .imp_adc_num = MT6358_IMP_ADC_NUM,
+ .read_imp = mt6358_read_imp,
+};
+
+static const struct mtk_pmic_auxadc_info mt6359_chip_info = {
+ .model_name = "MT6359",
+ .channels = mt6359_auxadc_channels,
+ .num_channels = ARRAY_SIZE(mt6359_auxadc_channels),
+ .desc = mt6359_auxadc_ch_desc,
+ .regs = mt6359_auxadc_regs,
+ .sec_unlock_key = 0x6359,
+ .read_imp = mt6359_read_imp,
+};
+
+static void mt6359_auxadc_reset(struct mt6359_auxadc *adc_dev)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ struct regmap *regmap = adc_dev->regmap;
+
+ /* Unlock HK_TOP writes */
+ if (cinfo->sec_unlock_key)
+ regmap_write(regmap, cinfo->regs[PMIC_HK_TOP_WKEY], cinfo->sec_unlock_key);
+
+ /* Assert ADC reset */
+ regmap_set_bits(regmap, cinfo->regs[PMIC_HK_TOP_RST_CON0], PMIC_RG_RESET_VAL);
+
+ /* De-assert ADC reset. No wait required, as pwrap takes care of that for us. */
+ regmap_clear_bits(regmap, cinfo->regs[PMIC_HK_TOP_RST_CON0], PMIC_RG_RESET_VAL);
+
+ /* Lock HK_TOP writes again */
+ if (cinfo->sec_unlock_key)
+ regmap_write(regmap, cinfo->regs[PMIC_HK_TOP_WKEY], 0);
+}
+
+static int mt6359_auxadc_read_adc(struct mt6359_auxadc *adc_dev,
+ const struct iio_chan_spec *chan, int *out)
+{
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ const struct mtk_pmic_auxadc_chan *desc = &cinfo->desc[chan->scan_index];
+ struct regmap *regmap = adc_dev->regmap;
+ u32 val;
+ int ret;
+
+ /* Request to start sampling for ADC channel */
+ ret = regmap_write(regmap, cinfo->regs[desc->req_idx], desc->req_mask);
+ if (ret)
+ return ret;
+
+ /* Wait until all samples are averaged */
+ fsleep(desc->num_samples * AUXADC_AVG_TIME_US);
+
+ ret = regmap_read_poll_timeout(regmap,
+ cinfo->regs[PMIC_AUXADC_ADC0] + (chan->address << 1),
+ val, val & PMIC_AUXADC_RDY_BIT,
+ AUXADC_POLL_DELAY_US, AUXADC_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /* Stop sampling */
+ regmap_write(regmap, cinfo->regs[desc->req_idx], 0);
+
+ *out = val & GENMASK(chan->scan_type.realbits - 1, 0);
+ return 0;
+}
+
+static int mt6359_auxadc_read_label(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *label)
+{
+ return sysfs_emit(label, "%s\n", chan->datasheet_name);
+}
+
+static int mt6359_auxadc_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct mt6359_auxadc *adc_dev = iio_priv(indio_dev);
+ const struct mtk_pmic_auxadc_info *cinfo = adc_dev->chip_info;
+ const struct mtk_pmic_auxadc_chan *desc = &cinfo->desc[chan->scan_index];
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ *val = desc->r_ratio.numerator * AUXADC_VOLT_FULL;
+
+ if (desc->r_ratio.denominator > 1) {
+ *val2 = desc->r_ratio.denominator;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return IIO_VAL_INT;
+ }
+
+ scoped_guard(mutex, &adc_dev->lock) {
+ switch (chan->scan_index) {
+ case PMIC_AUXADC_CHAN_IBAT:
+ ret = adc_dev->chip_info->read_imp(adc_dev, NULL, val);
+ break;
+ case PMIC_AUXADC_CHAN_VBAT:
+ ret = adc_dev->chip_info->read_imp(adc_dev, val, NULL);
+ break;
+ default:
+ ret = mt6359_auxadc_read_adc(adc_dev, chan, val);
+ break;
+ }
+ }
+
+ if (ret) {
+ /*
+ * If we get more than one timeout, it's possible that the
+ * AUXADC is stuck: perform a full reset to recover it.
+ */
+ if (ret == -ETIMEDOUT) {
+ if (adc_dev->timed_out) {
+ dev_warn(adc_dev->dev, "Resetting stuck ADC!\r\n");
+ mt6359_auxadc_reset(adc_dev);
+ }
+ adc_dev->timed_out = true;
+ }
+ return ret;
+ }
+ adc_dev->timed_out = false;
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info mt6359_auxadc_iio_info = {
+ .read_label = mt6359_auxadc_read_label,
+ .read_raw = mt6359_auxadc_read_raw,
+};
+
+static int mt6359_auxadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *mt6397_mfd_dev = dev->parent;
+ struct mt6359_auxadc *adc_dev;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret;
+
+ /* Regmap is from SoC PMIC Wrapper, parent of the mt6397 MFD */
+ regmap = dev_get_regmap(mt6397_mfd_dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc_dev));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc_dev = iio_priv(indio_dev);
+ adc_dev->regmap = regmap;
+ adc_dev->dev = dev;
+
+ adc_dev->chip_info = device_get_match_data(dev);
+ if (!adc_dev->chip_info)
+ return -EINVAL;
+
+ mutex_init(&adc_dev->lock);
+
+ mt6359_auxadc_reset(adc_dev);
+
+ indio_dev->name = adc_dev->chip_info->model_name;
+ indio_dev->info = &mt6359_auxadc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adc_dev->chip_info->channels;
+ indio_dev->num_channels = adc_dev->chip_info->num_channels;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register iio device\n");
+
+ return 0;
+}
+
+static const struct of_device_id mt6359_auxadc_of_match[] = {
+ { .compatible = "mediatek,mt6357-auxadc", .data = &mt6357_chip_info },
+ { .compatible = "mediatek,mt6358-auxadc", .data = &mt6358_chip_info },
+ { .compatible = "mediatek,mt6359-auxadc", .data = &mt6359_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mt6359_auxadc_of_match);
+
+static struct platform_driver mt6359_auxadc_driver = {
+ .driver = {
+ .name = "mt6359-auxadc",
+ .of_match_table = mt6359_auxadc_of_match,
+ },
+ .probe = mt6359_auxadc_probe,
+};
+module_platform_driver(mt6359_auxadc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("MediaTek MT6359 PMIC AUXADC Driver");
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index d9e1696df7ae..600151a62f1f 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -532,7 +532,7 @@ static int nau7802_probe(struct i2c_client *client)
}
static const struct i2c_device_id nau7802_i2c_id[] = {
- { "nau7802", 0 },
+ { "nau7802" },
{ }
};
MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index 456f12faa348..ae24a27805ab 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -227,11 +227,6 @@ struct pac1934_features {
const char *name;
};
-struct samp_rate_mapping {
- u16 samp_rate;
- u8 shift2value;
-};
-
static const unsigned int samp_rate_map_tbl[] = {
[PAC1934_SAMP_1024SPS] = 1024,
[PAC1934_SAMP_256SPS] = 256,
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 56a713766954..1402df68dd52 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -358,15 +358,15 @@ static int rradc_enable_continuous_mode(struct rradc_chip *chip)
int ret;
/* Clear channel log */
- ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_LOG,
- RR_ADC_LOG_CLR_CTRL, RR_ADC_LOG_CLR_CTRL);
+ ret = regmap_set_bits(chip->regmap, chip->base + RR_ADC_LOG,
+ RR_ADC_LOG_CLR_CTRL);
if (ret < 0) {
dev_err(chip->dev, "log ctrl update to clear failed:%d\n", ret);
return ret;
}
- ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_LOG,
- RR_ADC_LOG_CLR_CTRL, 0);
+ ret = regmap_clear_bits(chip->regmap, chip->base + RR_ADC_LOG,
+ RR_ADC_LOG_CLR_CTRL);
if (ret < 0) {
dev_err(chip->dev, "log ctrl update to not clear failed:%d\n",
ret);
@@ -374,9 +374,8 @@ static int rradc_enable_continuous_mode(struct rradc_chip *chip)
}
/* Switch to continuous mode */
- ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_CTL,
- RR_ADC_CTL_CONTINUOUS_SEL,
- RR_ADC_CTL_CONTINUOUS_SEL);
+ ret = regmap_set_bits(chip->regmap, chip->base + RR_ADC_CTL,
+ RR_ADC_CTL_CONTINUOUS_SEL);
if (ret < 0)
dev_err(chip->dev, "Update to continuous mode failed:%d\n",
ret);
@@ -389,8 +388,8 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip)
int ret;
/* Switch to non continuous mode */
- ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_CTL,
- RR_ADC_CTL_CONTINUOUS_SEL, 0);
+ ret = regmap_clear_bits(chip->regmap, chip->base + RR_ADC_CTL,
+ RR_ADC_CTL_CONTINUOUS_SEL);
if (ret < 0)
dev_err(chip->dev, "Update to non-continuous mode failed:%d\n",
ret);
@@ -434,8 +433,8 @@ static int rradc_read_status_in_cont_mode(struct rradc_chip *chip,
return -EINVAL;
}
- ret = regmap_update_bits(chip->regmap, chip->base + chan->trigger_addr,
- chan->trigger_mask, chan->trigger_mask);
+ ret = regmap_set_bits(chip->regmap, chip->base + chan->trigger_addr,
+ chan->trigger_mask);
if (ret < 0) {
dev_err(chip->dev,
"Failed to apply trigger for channel '%s' ret=%d\n",
@@ -469,8 +468,8 @@ static int rradc_read_status_in_cont_mode(struct rradc_chip *chip,
rradc_disable_continuous_mode(chip);
disable_trigger:
- regmap_update_bits(chip->regmap, chip->base + chan->trigger_addr,
- chan->trigger_mask, 0);
+ regmap_clear_bits(chip->regmap, chip->base + chan->trigger_addr,
+ chan->trigger_mask);
return ret;
}
@@ -481,17 +480,16 @@ static int rradc_prepare_batt_id_conversion(struct rradc_chip *chip,
{
int ret;
- ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
- RR_ADC_BATT_ID_CTRL_CHANNEL_CONV,
- RR_ADC_BATT_ID_CTRL_CHANNEL_CONV);
+ ret = regmap_set_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
+ RR_ADC_BATT_ID_CTRL_CHANNEL_CONV);
if (ret < 0) {
dev_err(chip->dev, "Enabling BATT ID channel failed:%d\n", ret);
return ret;
}
- ret = regmap_update_bits(chip->regmap,
- chip->base + RR_ADC_BATT_ID_TRIGGER,
- RR_ADC_TRIGGER_CTL, RR_ADC_TRIGGER_CTL);
+ ret = regmap_set_bits(chip->regmap,
+ chip->base + RR_ADC_BATT_ID_TRIGGER,
+ RR_ADC_TRIGGER_CTL);
if (ret < 0) {
dev_err(chip->dev, "BATT_ID trigger set failed:%d\n", ret);
goto out_disable_batt_id;
@@ -500,12 +498,12 @@ static int rradc_prepare_batt_id_conversion(struct rradc_chip *chip,
ret = rradc_read_status_in_cont_mode(chip, chan_address);
/* Reset registers back to default values */
- regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_TRIGGER,
- RR_ADC_TRIGGER_CTL, 0);
+ regmap_clear_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_TRIGGER,
+ RR_ADC_TRIGGER_CTL);
out_disable_batt_id:
- regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
- RR_ADC_BATT_ID_CTRL_CHANNEL_CONV, 0);
+ regmap_clear_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
+ RR_ADC_BATT_ID_CTRL_CHANNEL_CONV);
return ret;
}
@@ -965,9 +963,9 @@ static int rradc_probe(struct platform_device *pdev)
if (batt_id_delay >= 0) {
batt_id_delay = FIELD_PREP(BATT_ID_SETTLE_MASK, batt_id_delay);
- ret = regmap_update_bits(chip->regmap,
- chip->base + RR_ADC_BATT_ID_CFG,
- batt_id_delay, batt_id_delay);
+ ret = regmap_set_bits(chip->regmap,
+ chip->base + RR_ADC_BATT_ID_CFG,
+ batt_id_delay);
if (ret < 0) {
dev_err(chip->dev,
"BATT_ID settling time config failed:%d\n",
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
index 6bf32907f01d..ce5f3011fe00 100644
--- a/drivers/iio/adc/rn5t618-adc.c
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -137,9 +137,8 @@ static int rn5t618_adc_read(struct iio_dev *iio_dev,
init_completion(&adc->conv_completion);
/* single conversion */
- ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
- RN5T618_ADCCNT3_GODONE,
- RN5T618_ADCCNT3_GODONE);
+ ret = regmap_set_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_GODONE);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index b4a2e057d80f..2535c2c3e60b 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -508,13 +508,13 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
}
}
- ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
- SC27XX_ADC_EN, SC27XX_ADC_EN);
+ ret = regmap_set_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN);
if (ret)
goto regulator_restore;
- ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
- SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
+ ret = regmap_set_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
+ SC27XX_ADC_IRQ_CLR);
if (ret)
goto disable_adc;
@@ -537,8 +537,8 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
if (ret)
goto disable_adc;
- ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
- SC27XX_ADC_CHN_RUN, SC27XX_ADC_CHN_RUN);
+ ret = regmap_set_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_CHN_RUN);
if (ret)
goto disable_adc;
@@ -559,8 +559,8 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
value &= SC27XX_ADC_DATA_MASK;
disable_adc:
- regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
- SC27XX_ADC_EN, 0);
+ regmap_clear_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN);
regulator_restore:
if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
ret_volref = regulator_set_voltage(data->volref,
@@ -765,15 +765,14 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
{
int ret;
- ret = regmap_update_bits(data->regmap, data->var_data->module_en,
- SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
+ ret = regmap_set_bits(data->regmap, data->var_data->module_en,
+ SC27XX_MODULE_ADC_EN);
if (ret)
return ret;
/* Enable ADC work clock and controller clock */
- ret = regmap_update_bits(data->regmap, data->var_data->clk_en,
- SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
- SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
+ ret = regmap_set_bits(data->regmap, data->var_data->clk_en,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
if (ret)
goto disable_adc;
@@ -789,11 +788,11 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
return 0;
disable_clk:
- regmap_update_bits(data->regmap, data->var_data->clk_en,
- SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+ regmap_clear_bits(data->regmap, data->var_data->clk_en,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
disable_adc:
- regmap_update_bits(data->regmap, data->var_data->module_en,
- SC27XX_MODULE_ADC_EN, 0);
+ regmap_clear_bits(data->regmap, data->var_data->module_en,
+ SC27XX_MODULE_ADC_EN);
return ret;
}
@@ -803,11 +802,11 @@ static void sc27xx_adc_disable(void *_data)
struct sc27xx_adc_data *data = _data;
/* Disable ADC work clock and controller clock */
- regmap_update_bits(data->regmap, data->var_data->clk_en,
- SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+ regmap_clear_bits(data->regmap, data->var_data->clk_en,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
- regmap_update_bits(data->regmap, data->var_data->module_en,
- SC27XX_MODULE_ADC_EN, 0);
+ regmap_clear_bits(data->regmap, data->var_data->module_en,
+ SC27XX_MODULE_ADC_EN);
}
static const struct sc27xx_adc_variant_data sc2731_data = {
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 9a47d2c87f05..fabd654245f5 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -759,8 +759,7 @@ static int stm32_dfsdm_start_conv(struct iio_dev *indio_dev,
return 0;
filter_unconfigure:
- regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_CFG_MASK, 0);
+ regmap_clear_bits(regmap, DFSDM_CR1(adc->fl_id), DFSDM_CR1_CFG_MASK);
stop_channels:
stm32_dfsdm_stop_channel(indio_dev);
@@ -774,8 +773,7 @@ static void stm32_dfsdm_stop_conv(struct iio_dev *indio_dev)
stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
- regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_CFG_MASK, 0);
+ regmap_clear_bits(regmap, DFSDM_CR1(adc->fl_id), DFSDM_CR1_CFG_MASK);
stm32_dfsdm_stop_channel(indio_dev);
}
@@ -951,16 +949,14 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
if (adc->nconv == 1 && !indio_dev->trig) {
/* Enable regular DMA transfer*/
- ret = regmap_update_bits(adc->dfsdm->regmap,
- DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RDMAEN_MASK,
- DFSDM_CR1_RDMAEN_MASK);
+ ret = regmap_set_bits(adc->dfsdm->regmap,
+ DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK);
} else {
/* Enable injected DMA transfer*/
- ret = regmap_update_bits(adc->dfsdm->regmap,
- DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_JDMAEN_MASK,
- DFSDM_CR1_JDMAEN_MASK);
+ ret = regmap_set_bits(adc->dfsdm->regmap,
+ DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_JDMAEN_MASK);
}
if (ret < 0)
@@ -981,8 +977,8 @@ static void stm32_dfsdm_adc_dma_stop(struct iio_dev *indio_dev)
if (!adc->dma_chan)
return;
- regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR1(adc->fl_id),
- DFSDM_CR1_RDMAEN_MASK | DFSDM_CR1_JDMAEN_MASK, 0);
+ regmap_clear_bits(adc->dfsdm->regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK | DFSDM_CR1_JDMAEN_MASK);
dmaengine_terminate_all(adc->dma_chan);
}
@@ -1305,9 +1301,8 @@ static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
if (status & DFSDM_ISR_ROVRF_MASK) {
if (int_en & DFSDM_CR2_ROVRIE_MASK)
dev_warn(&indio_dev->dev, "Overrun detected\n");
- regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
- DFSDM_ICR_CLRROVRF_MASK,
- DFSDM_ICR_CLRROVRF_MASK);
+ regmap_set_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK);
}
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index 69fcbbc7e418..9758ac801310 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -58,7 +58,6 @@
struct adc108s102_state {
struct spi_device *spi;
- struct regulator *reg;
u32 va_millivolt;
/* SPI transfer used by triggered buffer handler*/
struct spi_transfer ring_xfer;
@@ -216,11 +215,6 @@ static const struct iio_info adc108s102_info = {
.update_scan_mode = &adc108s102_update_scan_mode,
};
-static void adc108s102_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int adc108s102_probe(struct spi_device *spi)
{
struct adc108s102_state *st;
@@ -236,25 +230,9 @@ static int adc108s102_probe(struct spi_device *spi)
if (ACPI_COMPANION(&spi->dev)) {
st->va_millivolt = ADC108S102_VA_MV_ACPI_DEFAULT;
} else {
- st->reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(st->reg))
- return PTR_ERR(st->reg);
-
- ret = regulator_enable(st->reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Cannot enable vref regulator\n");
- return ret;
- }
- ret = devm_add_action_or_reset(&spi->dev, adc108s102_reg_disable,
- st->reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0) {
- dev_err(&spi->dev, "vref get voltage failed\n");
- return ret;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "failed get vref voltage\n");
st->va_millivolt = ret / 1000;
}
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index b789891dcf49..f7c78d0dd449 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -137,17 +137,13 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = ti_adc_read_measurement(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
-
- if (ret)
- return ret;
-
- return IIO_VAL_INT;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = ti_adc_read_measurement(data, chan, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ }
+ unreachable();
case IIO_CHAN_INFO_SCALE:
ret = regulator_get_voltage(data->ref);
if (ret < 0)
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
new file mode 100644
index 000000000000..630f5d5f9a60
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Texas Instruments ADS1119 ADC driver.
+ *
+ * Copyright 2024 Toradex
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define ADS1119_CMD_RESET 0x06
+#define ADS1119_CMD_POWERDOWN 0x02
+#define ADS1119_CMD_START_SYNC 0x08
+#define ADS1119_CMD_RDATA 0x10
+#define ADS1119_CMD_RREG_CONFIG 0x20
+#define ADS1119_CMD_RREG_STATUS 0x24
+#define ADS1119_CMD_WREG 0x40
+
+#define ADS1119_CMD_RREG(reg) (0x20 | (reg) << 2)
+
+/* Config register */
+#define ADS1119_REG_CONFIG 0x00
+#define ADS1119_CONFIG_VREF_FIELD BIT(0)
+#define ADS1119_CONFIG_CM_FIELD BIT(1)
+#define ADS1119_CONFIG_DR_FIELD GENMASK(3, 2)
+#define ADS1119_CONFIG_GAIN_FIELD BIT(4)
+#define ADS1119_CONFIG_MUX_FIELD GENMASK(7, 5)
+
+#define ADS1119_VREF_INTERNAL 0
+#define ADS1119_VREF_EXTERNAL 1
+#define ADS1119_VREF_INTERNAL_VAL 2048000
+
+#define ADS1119_CM_SINGLE 0
+#define ADS1119_CM_CONTINUOUS 1
+
+#define ADS1119_DR_20_SPS 0
+#define ADS1119_DR_90_SPS 1
+#define ADS1119_DR_330_SPS 2
+#define ADS1119_DR_1000_SPS 3
+
+#define ADS1119_GAIN_1 0
+#define ADS1119_GAIN_4 1
+
+#define ADS1119_MUX_AIN0_AIN1 0
+#define ADS1119_MUX_AIN2_AIN3 1
+#define ADS1119_MUX_AIN1_AIN2 2
+#define ADS1119_MUX_AIN0 3
+#define ADS1119_MUX_AIN1 4
+#define ADS1119_MUX_AIN2 5
+#define ADS1119_MUX_AIN3 6
+#define ADS1119_MUX_SHORTED 7
+
+/* Status register */
+#define ADS1119_REG_STATUS 0x01
+#define ADS1119_STATUS_DRDY_FIELD BIT(7)
+
+#define ADS1119_DEFAULT_GAIN 1
+#define ADS1119_DEFAULT_DATARATE 20
+
+#define ADS1119_SUSPEND_DELAY 2000
+
+/* Timeout based on the minimum sample rate of 20 SPS (50000us) */
+#define ADS1119_MAX_DRDY_TIMEOUT 85000
+
+#define ADS1119_MAX_CHANNELS 7
+#define ADS1119_MAX_SINGLE_CHANNELS 4
+
+struct ads1119_channel_config {
+ int gain;
+ int datarate;
+ int mux;
+};
+
+struct ads1119_state {
+ struct completion completion;
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct iio_trigger *trig;
+ struct ads1119_channel_config *channels_cfg;
+ unsigned int num_channels_cfg;
+ unsigned int cached_config;
+ int vref_uV;
+};
+
+static const char * const ads1119_power_supplies[] = {
+ "avdd", "dvdd"
+};
+
+static const int ads1119_available_datarates[] = {
+ 20, 90, 330, 1000,
+};
+
+static const int ads1119_available_gains[] = {
+ 1, 1,
+ 1, 4,
+};
+
+static int ads1119_upd_cfg_reg(struct ads1119_state *st, unsigned int fields,
+ unsigned int val)
+{
+ unsigned int config = st->cached_config;
+ int ret;
+
+ config &= ~fields;
+ config |= val;
+
+ ret = i2c_smbus_write_byte_data(st->client, ADS1119_CMD_WREG, config);
+ if (ret)
+ return ret;
+
+ st->cached_config = config;
+
+ return 0;
+}
+
+static bool ads1119_data_ready(struct ads1119_state *st)
+{
+ int status;
+
+ status = i2c_smbus_read_byte_data(st->client, ADS1119_CMD_RREG_STATUS);
+ if (status < 0)
+ return false;
+
+ return FIELD_GET(ADS1119_STATUS_DRDY_FIELD, status);
+}
+
+static int ads1119_reset(struct ads1119_state *st)
+{
+ st->cached_config = 0;
+
+ if (!st->reset_gpio)
+ return i2c_smbus_write_byte(st->client, ADS1119_CMD_RESET);
+
+ gpiod_set_value_cansleep(st->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value_cansleep(st->reset_gpio, 0);
+ udelay(1);
+
+ return 0;
+}
+
+static int ads1119_set_conv_mode(struct ads1119_state *st, bool continuous)
+{
+ unsigned int mode;
+
+ if (continuous)
+ mode = ADS1119_CM_CONTINUOUS;
+ else
+ mode = ADS1119_CM_SINGLE;
+
+ return ads1119_upd_cfg_reg(st, ADS1119_CONFIG_CM_FIELD,
+ FIELD_PREP(ADS1119_CONFIG_CM_FIELD, mode));
+}
+
+static int ads1119_get_hw_gain(int gain)
+{
+ if (gain == 4)
+ return ADS1119_GAIN_4;
+ else
+ return ADS1119_GAIN_1;
+}
+
+static int ads1119_get_hw_datarate(int datarate)
+{
+ switch (datarate) {
+ case 90:
+ return ADS1119_DR_90_SPS;
+ case 330:
+ return ADS1119_DR_330_SPS;
+ case 1000:
+ return ADS1119_DR_1000_SPS;
+ case 20:
+ default:
+ return ADS1119_DR_20_SPS;
+ }
+}
+
+static int ads1119_configure_channel(struct ads1119_state *st, int mux,
+ int gain, int datarate)
+{
+ int ret;
+
+ ret = ads1119_upd_cfg_reg(st, ADS1119_CONFIG_MUX_FIELD,
+ FIELD_PREP(ADS1119_CONFIG_MUX_FIELD, mux));
+ if (ret)
+ return ret;
+
+ ret = ads1119_upd_cfg_reg(st, ADS1119_CONFIG_GAIN_FIELD,
+ FIELD_PREP(ADS1119_CONFIG_GAIN_FIELD,
+ ads1119_get_hw_gain(gain)));
+ if (ret)
+ return ret;
+
+ return ads1119_upd_cfg_reg(st, ADS1119_CONFIG_DR_FIELD,
+ FIELD_PREP(ADS1119_CONFIG_DR_FIELD,
+ ads1119_get_hw_datarate(datarate)));
+}
+
+static int ads1119_poll_data_ready(struct ads1119_state *st,
+ struct iio_chan_spec const *chan)
+{
+ unsigned int datarate = st->channels_cfg[chan->address].datarate;
+ unsigned long wait_time;
+ bool data_ready;
+
+ /* Poll 5 times more than the data rate */
+ wait_time = DIV_ROUND_CLOSEST(MICRO, 5 * datarate);
+
+ return read_poll_timeout(ads1119_data_ready, data_ready,
+ data_ready, wait_time,
+ ADS1119_MAX_DRDY_TIMEOUT, false, st);
+}
+
+static int ads1119_read_data(struct ads1119_state *st,
+ struct iio_chan_spec const *chan,
+ unsigned int *val)
+{
+ unsigned int timeout;
+ int ret = 0;
+
+ timeout = msecs_to_jiffies(ADS1119_MAX_DRDY_TIMEOUT);
+
+ if (!st->client->irq) {
+ ret = ads1119_poll_data_ready(st, chan);
+ if (ret)
+ return ret;
+ } else if (!wait_for_completion_timeout(&st->completion, timeout)) {
+ return -ETIMEDOUT;
+ }
+
+ ret = i2c_smbus_read_word_swapped(st->client, ADS1119_CMD_RDATA);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int ads1119_single_conversion(struct ads1119_state *st,
+ struct iio_chan_spec const *chan,
+ int *val,
+ bool calib_offset)
+{
+ struct device *dev = &st->client->dev;
+ int mux = st->channels_cfg[chan->address].mux;
+ int gain = st->channels_cfg[chan->address].gain;
+ int datarate = st->channels_cfg[chan->address].datarate;
+ unsigned int sample;
+ int ret;
+
+ if (calib_offset)
+ mux = ADS1119_MUX_SHORTED;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto pdown;
+
+ ret = ads1119_configure_channel(st, mux, gain, datarate);
+ if (ret)
+ goto pdown;
+
+ ret = i2c_smbus_write_byte(st->client, ADS1119_CMD_START_SYNC);
+ if (ret)
+ goto pdown;
+
+ ret = ads1119_read_data(st, chan, &sample);
+ if (ret)
+ goto pdown;
+
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+ ret = IIO_VAL_INT;
+pdown:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int ads1119_validate_datarate(struct ads1119_state *st, int datarate)
+{
+ switch (datarate) {
+ case 20:
+ case 90:
+ case 330:
+ case 1000:
+ return datarate;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1119_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_FRACTIONAL;
+ *vals = ads1119_available_gains;
+ *length = ARRAY_SIZE(ads1119_available_gains);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT;
+ *vals = ads1119_available_datarates;
+ *length = ARRAY_SIZE(ads1119_available_datarates);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1119_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ads1119_state *st = iio_priv(indio_dev);
+ unsigned int index = chan->address;
+
+ if (index >= st->num_channels_cfg)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return ads1119_single_conversion(st, chan, val, false);
+ unreachable();
+ case IIO_CHAN_INFO_OFFSET:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return ads1119_single_conversion(st, chan, val, true);
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_uV / 1000;
+ *val /= st->channels_cfg[index].gain;
+ *val2 = chan->scan_type.realbits - 1;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->channels_cfg[index].datarate;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1119_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads1119_state *st = iio_priv(indio_dev);
+ unsigned int index = chan->address;
+ int ret;
+
+ if (index >= st->num_channels_cfg)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = MICRO / ((val * MICRO) + val2);
+ if (ret != 1 && ret != 4)
+ return -EINVAL;
+
+ st->channels_cfg[index].gain = ret;
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads1119_validate_datarate(st, val);
+ if (ret < 0)
+ return ret;
+
+ st->channels_cfg[index].datarate = ret;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1119_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ads1119_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (reg > ADS1119_REG_STATUS)
+ return -EINVAL;
+
+ if (readval) {
+ ret = i2c_smbus_read_byte_data(st->client,
+ ADS1119_CMD_RREG(reg));
+ if (ret < 0)
+ return ret;
+
+ *readval = ret;
+ return 0;
+ }
+
+ if (reg > ADS1119_REG_CONFIG)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(st->client, ADS1119_CMD_WREG,
+ writeval);
+}
+
+static const struct iio_info ads1119_info = {
+ .read_avail = ads1119_read_avail,
+ .read_raw = ads1119_read_raw,
+ .write_raw = ads1119_write_raw,
+ .debugfs_reg_access = ads1119_debugfs_reg_access,
+};
+
+static int ads1119_triggered_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ads1119_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->client->dev;
+ unsigned int index;
+ int ret;
+
+ index = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ ret = ads1119_set_conv_mode(st, true);
+ if (ret)
+ return ret;
+
+ ret = ads1119_configure_channel(st,
+ st->channels_cfg[index].mux,
+ st->channels_cfg[index].gain,
+ st->channels_cfg[index].datarate);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ return i2c_smbus_write_byte(st->client, ADS1119_CMD_START_SYNC);
+}
+
+static int ads1119_triggered_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ads1119_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->client->dev;
+ int ret;
+
+ ret = ads1119_set_conv_mode(st, false);
+ if (ret)
+ return ret;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ads1119_buffer_setup_ops = {
+ .preenable = ads1119_triggered_buffer_preenable,
+ .postdisable = ads1119_triggered_buffer_postdisable,
+ .validate_scan_mask = &iio_validate_scan_mask_onehot,
+};
+
+static const struct iio_trigger_ops ads1119_trigger_ops = {
+ .validate_device = &iio_trigger_validate_own_device,
+};
+
+static irqreturn_t ads1119_irq_handler(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ads1119_state *st = iio_priv(indio_dev);
+
+ if (iio_buffer_enabled(indio_dev) && iio_trigger_using_own(indio_dev))
+ iio_trigger_poll(indio_dev->trig);
+ else
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ads1119_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ads1119_state *st = iio_priv(indio_dev);
+ struct {
+ unsigned int sample;
+ s64 timestamp __aligned(8);
+ } scan;
+ unsigned int index;
+ int ret;
+
+ if (!iio_trigger_using_own(indio_dev)) {
+ index = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ ret = ads1119_poll_data_ready(st, &indio_dev->channels[index]);
+ if (ret) {
+ dev_err(&st->client->dev,
+ "Failed to poll data on trigger (%d)\n", ret);
+ goto done;
+ }
+ }
+
+ ret = i2c_smbus_read_word_swapped(st->client, ADS1119_CMD_RDATA);
+ if (ret < 0) {
+ dev_err(&st->client->dev,
+ "Failed to read data on trigger (%d)\n", ret);
+ goto done;
+ }
+
+ scan.sample = ret;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
+ iio_get_time_ns(indio_dev));
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int ads1119_init(struct ads1119_state *st, bool vref_external)
+{
+ int ret;
+
+ ret = ads1119_reset(st);
+ if (ret)
+ return ret;
+
+ if (vref_external)
+ return ads1119_upd_cfg_reg(st,
+ ADS1119_CONFIG_VREF_FIELD,
+ FIELD_PREP(ADS1119_CONFIG_VREF_FIELD,
+ ADS1119_VREF_EXTERNAL));
+ return 0;
+}
+
+static int ads1119_map_analog_inputs_mux(int ain_pos, int ain_neg,
+ bool differential)
+{
+ if (ain_pos >= ADS1119_MAX_SINGLE_CHANNELS)
+ return -EINVAL;
+
+ if (!differential)
+ return ADS1119_MUX_AIN0 + ain_pos;
+
+ if (ain_pos == 0 && ain_neg == 1)
+ return ADS1119_MUX_AIN0_AIN1;
+ else if (ain_pos == 1 && ain_neg == 2)
+ return ADS1119_MUX_AIN1_AIN2;
+ else if (ain_pos == 2 && ain_neg == 3)
+ return ADS1119_MUX_AIN2_AIN3;
+
+ return -EINVAL;
+}
+
+static int ads1119_alloc_and_config_channels(struct iio_dev *indio_dev)
+{
+ const struct iio_chan_spec ads1119_channel =
+ (const struct iio_chan_spec) {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ };
+ const struct iio_chan_spec ads1119_ts = IIO_CHAN_SOFT_TIMESTAMP(0);
+ struct ads1119_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *iio_channels, *chan;
+ struct device *dev = &st->client->dev;
+ unsigned int num_channels, i;
+ bool differential;
+ u32 ain[2];
+ int ret;
+
+ st->num_channels_cfg = device_get_child_node_count(dev);
+ if (st->num_channels_cfg > ADS1119_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many channels %d, max is %d\n",
+ st->num_channels_cfg,
+ ADS1119_MAX_CHANNELS);
+
+ st->channels_cfg = devm_kcalloc(dev, st->num_channels_cfg,
+ sizeof(*st->channels_cfg), GFP_KERNEL);
+ if (!st->channels_cfg)
+ return -ENOMEM;
+
+ /* Allocate one more iio channel for the timestamp */
+ num_channels = st->num_channels_cfg + 1;
+ iio_channels = devm_kcalloc(dev, num_channels, sizeof(*iio_channels),
+ GFP_KERNEL);
+ if (!iio_channels)
+ return -ENOMEM;
+
+ i = 0;
+
+ device_for_each_child_node_scoped(dev, child) {
+ chan = &iio_channels[i];
+
+ differential = fwnode_property_present(child, "diff-channels");
+ if (differential)
+ ret = fwnode_property_read_u32_array(child,
+ "diff-channels",
+ ain, 2);
+ else
+ ret = fwnode_property_read_u32(child, "single-channel",
+ &ain[0]);
+
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get channel property\n");
+
+ ret = ads1119_map_analog_inputs_mux(ain[0], ain[1],
+ differential);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Invalid channel value\n");
+
+ st->channels_cfg[i].mux = ret;
+ st->channels_cfg[i].gain = ADS1119_DEFAULT_GAIN;
+ st->channels_cfg[i].datarate = ADS1119_DEFAULT_DATARATE;
+
+ *chan = ads1119_channel;
+ chan->channel = ain[0];
+ chan->address = i;
+ chan->scan_index = i;
+
+ if (differential) {
+ chan->channel2 = ain[1];
+ chan->differential = 1;
+ }
+
+ dev_dbg(dev, "channel: index %d, mux %d\n", i,
+ st->channels_cfg[i].mux);
+
+ i++;
+ }
+
+ iio_channels[i] = ads1119_ts;
+ iio_channels[i].address = i;
+ iio_channels[i].scan_index = i;
+
+ indio_dev->channels = iio_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static void ads1119_powerdown(void *data)
+{
+ struct ads1119_state *st = data;
+
+ i2c_smbus_write_byte(st->client, ADS1119_CMD_POWERDOWN);
+}
+
+static int ads1119_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct ads1119_state *st;
+ struct device *dev = &client->dev;
+ bool vref_external = true;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to allocate IIO device\n");
+
+ st = iio_priv(indio_dev);
+ st->client = client;
+
+ indio_dev->name = "ads1119";
+ indio_dev->info = &ads1119_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(ads1119_power_supplies),
+ ads1119_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable supplies\n");
+
+ st->vref_uV = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (st->vref_uV == -ENODEV) {
+ vref_external = false;
+ st->vref_uV = ADS1119_VREF_INTERNAL_VAL;
+ } else if (st->vref_uV < 0) {
+ return dev_err_probe(dev, st->vref_uV, "Failed to get vref\n");
+ }
+
+ st->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ ret = ads1119_alloc_and_config_channels(indio_dev);
+ if (ret)
+ return ret;
+
+ init_completion(&st->completion);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ ads1119_trigger_handler,
+ &ads1119_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup IIO buffer\n");
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq,
+ ads1119_irq_handler,
+ NULL, IRQF_TRIGGER_FALLING,
+ "ads1119", indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to allocate irq\n");
+
+ st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!st->trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to allocate IIO trigger\n");
+
+ st->trig->ops = &ads1119_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(dev, st->trig);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register IIO trigger\n");
+ }
+
+ ret = ads1119_init(st, vref_external);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize device\n");
+
+ pm_runtime_set_autosuspend_delay(dev, ADS1119_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_set_active(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable pm runtime\n");
+
+ ret = devm_add_action_or_reset(dev, ads1119_powerdown, st);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add powerdown action\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int ads1119_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1119_state *st = iio_priv(indio_dev);
+
+ return i2c_smbus_write_byte(st->client, ADS1119_CMD_POWERDOWN);
+}
+
+/*
+ * The ADS1119 does not require a resume function because it automatically
+ * powers on after a reset.
+ * After a power down command, the ADS1119 can still communicate but turns off
+ * its analog parts. To resume from power down, the device will power up again
+ * upon receiving a start/sync command.
+ */
+static DEFINE_RUNTIME_DEV_PM_OPS(ads1119_pm_ops, ads1119_runtime_suspend,
+ NULL, NULL);
+
+static const struct of_device_id __maybe_unused ads1119_of_match[] = {
+ { .compatible = "ti,ads1119" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads1119_of_match);
+
+static const struct i2c_device_id ads1119_id[] = {
+ { "ads1119", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ads1119_id);
+
+static struct i2c_driver ads1119_driver = {
+ .driver = {
+ .name = "ads1119",
+ .of_match_table = ads1119_of_match,
+ .pm = pm_ptr(&ads1119_pm_ops),
+ },
+ .probe = ads1119_probe,
+ .id_table = ads1119_id,
+};
+module_i2c_driver(ads1119_driver);
+
+MODULE_AUTHOR("João Paulo Gonçalves <joao.goncalves@toradex.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS1119 ADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index cb04a29b3dba..96dd9366f8ff 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -802,9 +802,7 @@ static int ads131e08_probe(struct spi_device *spi)
unsigned long adc_clk_ns;
int ret;
- info = device_get_match_data(&spi->dev);
- if (!info)
- info = (void *)spi_get_device_id(spi)->driver_data;
+ info = spi_get_device_match_data(spi);
if (!info) {
dev_err(&spi->dev, "failed to get match data\n");
return -ENODEV;
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index afdbd04778a8..4da78302359b 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -447,7 +447,7 @@ static int ads7924_probe(struct i2c_client *client)
}
static const struct i2c_device_id ads7924_id[] = {
- { "ads7924", 0 },
+ { "ads7924" },
{}
};
MODULE_DEVICE_TABLE(i2c, ads7924_id);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 9440a268a78c..7a79f0cebfbf 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -65,7 +65,6 @@ struct ads8688_state {
struct mutex lock;
const struct ads8688_chip_info *chip_info;
struct spi_device *spi;
- struct regulator *reg;
unsigned int vref_mv;
enum ads8688_range range[8];
union {
@@ -423,28 +422,16 @@ static int ads8688_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0)
- goto err_regulator_disable;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
- st->vref_mv = ret / 1000;
- } else {
- /* Use internal reference */
- st->vref_mv = ADS8688_VREF_MV;
- }
+ st->vref_mv = ret == -ENODEV ? ADS8688_VREF_MV : ret / 1000;
st->chip_info = &ads8688_chip_info_tbl[spi_get_device_id(spi)->driver_data];
spi->mode = SPI_MODE_1;
- spi_set_drvdata(spi, indio_dev);
-
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
@@ -457,38 +444,13 @@ static int ads8688_probe(struct spi_device *spi)
mutex_init(&st->lock);
- ret = iio_triggered_buffer_setup(indio_dev, NULL, ads8688_trigger_handler, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "iio triggered buffer setup failed\n");
- goto err_regulator_disable;
- }
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto err_buffer_cleanup;
-
- return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-
-err_regulator_disable:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static void ads8688_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ads8688_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ ads8688_trigger_handler, NULL);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "iio triggered buffer setup failed\n");
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ads8688_id[] = {
@@ -511,7 +473,6 @@ static struct spi_driver ads8688_driver = {
.of_match_table = ads8688_of_match,
},
.probe = ads8688_probe,
- .remove = ads8688_remove,
.id_table = ads8688_id,
};
module_spi_driver(ads8688_driver);
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 1bbb51a6683c..edcef8f11522 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -804,12 +804,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
return -EINVAL;
}
- dcfg = device_get_match_data(dev);
- if (!dcfg) {
- const struct spi_device_id *id = spi_get_device_id(spi);
-
- dcfg = (const struct tsc2046_adc_dcfg *)id->driver_data;
- }
+ dcfg = spi_get_device_match_data(spi);
if (!dcfg)
return -EINVAL;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index f0b71a1220e0..f051358d6b50 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -222,7 +222,7 @@ enum ams_ps_pl_seq {
#define PL_SEQ(x) (AMS_PS_SEQ_MAX + (x))
#define AMS_CTRL_SEQ_BASE (AMS_PS_SEQ_MAX * 3)
-#define AMS_CHAN_TEMP(_scan_index, _addr) { \
+#define AMS_CHAN_TEMP(_scan_index, _addr, _name) { \
.type = IIO_TEMP, \
.indexed = 1, \
.address = (_addr), \
@@ -232,9 +232,10 @@ enum ams_ps_pl_seq {
.event_spec = ams_temp_events, \
.scan_index = _scan_index, \
.num_event_specs = ARRAY_SIZE(ams_temp_events), \
+ .datasheet_name = _name, \
}
-#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm, _name) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.address = (_addr), \
@@ -243,21 +244,24 @@ enum ams_ps_pl_seq {
.event_spec = (_alarm) ? ams_voltage_events : NULL, \
.scan_index = _scan_index, \
.num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+ .datasheet_name = _name, \
}
-#define AMS_PS_CHAN_TEMP(_scan_index, _addr) \
- AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr)
-#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \
- AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true)
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr, _name) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr, _name)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr, _name) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true, _name)
-#define AMS_PL_CHAN_TEMP(_scan_index, _addr) \
- AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr)
-#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \
- AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm)
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr, _name) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr, _name)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm, _name) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm, _name)
#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \
- AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false)
-#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \
- AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false)
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false, \
+ "VAUX" #_auxno)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr, _name) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false, \
+ _name)
/**
* struct ams - This structure contains necessary state for xilinx-ams to operate
@@ -414,8 +418,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
/* Run calibration of PS & PL as part of the sequence */
scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
- for (i = 0; i < indio_dev->num_channels; i++)
- scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ const struct iio_chan_spec *chan = &indio_dev->channels[i];
+
+ if (chan->scan_index < AMS_CTRL_SEQ_BASE)
+ scan_mask |= BIT_ULL(chan->scan_index);
+ }
if (ams->ps_base) {
/* put sysmon in a soft reset to change the sequence */
@@ -501,6 +509,12 @@ static int ams_init_device(struct ams *ams)
return 0;
}
+static int ams_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ return sysfs_emit(label, "%s\n", chan->datasheet_name);
+}
+
static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
{
u8 channel_num;
@@ -1112,37 +1126,37 @@ static const struct iio_event_spec ams_voltage_events[] = {
};
static const struct iio_chan_spec ams_ps_channels[] = {
- AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
- AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10),
- AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "Temp_LPD"),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE, "Temp_FPD"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "VCC_PSINTLP"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "VCC_PSINTFP"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "VCC_PSAUX"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "VCC_PSDDR"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "VCC_PSIO3"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "VCC_PSIO0"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "VCC_PSIO1"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "VCC_PSIO2"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "PS_MGTRAVCC"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "PS_MGTRAVTT"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "VCC_PSADC"),
};
static const struct iio_chan_spec ams_pl_channels[] = {
- AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true),
- AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true),
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "Temp_PL"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true, "VCCINT"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true, "VCCAUX"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false, "VREFP"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false, "VREFN"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true, "VCCBRAM"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true, "VCC_PSINTLP"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true, "VCC_PSINTFP"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true, "VCC_PSAUX"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true, "VCCAMS"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false, "VP_VN"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true, "VUser0"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true, "VUser1"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true, "VUser2"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true, "VUser3"),
AMS_PL_AUX_CHAN_VOLTAGE(0),
AMS_PL_AUX_CHAN_VOLTAGE(1),
AMS_PL_AUX_CHAN_VOLTAGE(2),
@@ -1162,13 +1176,13 @@ static const struct iio_chan_spec ams_pl_channels[] = {
};
static const struct iio_chan_spec ams_ctrl_channels[] = {
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL),
- AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0, "VCC_PSPLL"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3, "VCC_PSBATT"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT, "VCCINT"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM, "VCCBRAM"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX, "VCCAUX"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL, "VCC_PSDDR_PLL"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR, "VCC_PSINTFP_DDR"),
};
static int ams_get_ext_chan(struct fwnode_handle *chan_node,
@@ -1332,6 +1346,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
}
static const struct iio_info iio_ams_info = {
+ .read_label = ams_read_label,
.read_raw = &ams_read_raw,
.read_event_config = &ams_read_event_config,
.write_event_config = &ams_write_event_config,
@@ -1430,5 +1445,6 @@ static struct platform_driver ams_driver = {
};
module_platform_driver(ams_driver);
+MODULE_DESCRIPTION("Xilinx AMS driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Xilinx, Inc.");
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index cd26a16dc0ff..2410d72da49b 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1365,16 +1365,9 @@ static int ad74413r_probe(struct spi_device *spi)
st->spi = spi;
st->dev = &spi->dev;
- st->chip_info = device_get_match_data(&spi->dev);
- if (!st->chip_info) {
- const struct spi_device_id *id = spi_get_device_id(spi);
-
- if (id)
- st->chip_info =
- (struct ad74413r_chip_info *)id->driver_data;
- if (!st->chip_info)
- return -EINVAL;
- }
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -EINVAL;
mutex_init(&st->lock);
init_completion(&st->adc_data_completion);
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 13b1a858969e..647f417a045e 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -4,6 +4,8 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/atomic.h>
+#include <linux/cleanup.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -14,6 +16,8 @@
#include <linux/poll.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/sizes.h>
@@ -94,13 +98,18 @@ static void iio_buffer_block_release(struct kref *kref)
{
struct iio_dma_buffer_block *block = container_of(kref,
struct iio_dma_buffer_block, kref);
+ struct iio_dma_buffer_queue *queue = block->queue;
- WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
+ WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
- dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
- block->vaddr, block->phys_addr);
+ if (block->fileio) {
+ dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
+ block->vaddr, block->phys_addr);
+ } else {
+ atomic_dec(&queue->num_dmabufs);
+ }
- iio_buffer_put(&block->queue->buffer);
+ iio_buffer_put(&queue->buffer);
kfree(block);
}
@@ -163,7 +172,7 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
}
static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
- struct iio_dma_buffer_queue *queue, size_t size)
+ struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
{
struct iio_dma_buffer_block *block;
@@ -171,13 +180,16 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
if (!block)
return NULL;
- block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
- &block->phys_addr, GFP_KERNEL);
- if (!block->vaddr) {
- kfree(block);
- return NULL;
+ if (fileio) {
+ block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
+ &block->phys_addr, GFP_KERNEL);
+ if (!block->vaddr) {
+ kfree(block);
+ return NULL;
+ }
}
+ block->fileio = fileio;
block->size = size;
block->state = IIO_BLOCK_STATE_DONE;
block->queue = queue;
@@ -186,6 +198,9 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
iio_buffer_get(&queue->buffer);
+ if (!fileio)
+ atomic_inc(&queue->num_dmabufs);
+
return block;
}
@@ -218,13 +233,20 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
{
struct iio_dma_buffer_queue *queue = block->queue;
unsigned long flags;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
spin_lock_irqsave(&queue->list_lock, flags);
_iio_dma_buffer_block_done(block);
spin_unlock_irqrestore(&queue->list_lock, flags);
+ if (!block->fileio)
+ iio_buffer_signal_dmabuf_done(block->fence, 0);
+
iio_buffer_block_put_atomic(block);
iio_dma_buffer_queue_wake(queue);
+ dma_fence_end_signalling(cookie);
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
@@ -243,17 +265,27 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
{
struct iio_dma_buffer_block *block, *_block;
unsigned long flags;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
spin_lock_irqsave(&queue->list_lock, flags);
list_for_each_entry_safe(block, _block, list, head) {
list_del(&block->head);
block->bytes_used = 0;
_iio_dma_buffer_block_done(block);
+
+ if (!block->fileio)
+ iio_buffer_signal_dmabuf_done(block->fence, -EINTR);
iio_buffer_block_put_atomic(block);
}
spin_unlock_irqrestore(&queue->list_lock, flags);
+ if (queue->fileio.enabled)
+ queue->fileio.enabled = false;
+
iio_dma_buffer_queue_wake(queue);
+ dma_fence_end_signalling(cookie);
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
@@ -273,6 +305,16 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
}
}
+static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue)
+{
+ /*
+ * Note that queue->num_dmabufs cannot increase while the queue is
+ * locked, it can only decrease, so it does not race against
+ * iio_dma_buffer_alloc_block().
+ */
+ return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs);
+}
+
/**
* iio_dma_buffer_request_update() - DMA buffer request_update callback
* @buffer: The buffer which to request an update
@@ -299,6 +341,12 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
mutex_lock(&queue->lock);
+ queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue);
+
+ /* If DMABUFs were created, disable fileio interface */
+ if (!queue->fileio.enabled)
+ goto out_unlock;
+
/* Allocations are page aligned */
if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
try_reuse = true;
@@ -339,7 +387,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
}
if (!block) {
- block = iio_dma_buffer_alloc_block(queue, size);
+ block = iio_dma_buffer_alloc_block(queue, size, true);
if (!block) {
ret = -ENOMEM;
goto out_unlock;
@@ -412,8 +460,12 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
block->state = IIO_BLOCK_STATE_ACTIVE;
iio_buffer_block_get(block);
+
ret = queue->ops->submit(queue, block);
if (ret) {
+ if (!block->fileio)
+ iio_buffer_signal_dmabuf_done(block->fence, ret);
+
/*
* This is a bit of a problem and there is not much we can do
* other then wait for the buffer to be disabled and re-enabled
@@ -646,6 +698,110 @@ size_t iio_dma_buffer_usage(struct iio_buffer *buf)
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_usage);
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach)
+{
+ struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+ struct iio_dma_buffer_block *block;
+
+ guard(mutex)(&queue->lock);
+
+ /*
+ * If the buffer is enabled and in fileio mode new blocks can't be
+ * allocated.
+ */
+ if (queue->fileio.enabled)
+ return ERR_PTR(-EBUSY);
+
+ block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ /* Free memory that might be in use for fileio mode */
+ iio_dma_buffer_fileio_free(queue);
+
+ return block;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);
+
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block)
+{
+ block->state = IIO_BLOCK_STATE_DEAD;
+ iio_buffer_block_put_atomic(block);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf);
+
+static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
+{
+ struct iio_dma_buffer_queue *queue = block->queue;
+
+ /* If in fileio mode buffers can't be enqueued. */
+ if (queue->fileio.enabled)
+ return -EBUSY;
+
+ switch (block->state) {
+ case IIO_BLOCK_STATE_QUEUED:
+ return -EPERM;
+ case IIO_BLOCK_STATE_ACTIVE:
+ case IIO_BLOCK_STATE_DEAD:
+ return -EBUSY;
+ case IIO_BLOCK_STATE_DONE:
+ break;
+ }
+
+ return 0;
+}
+
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence,
+ struct sg_table *sgt,
+ size_t size, bool cyclic)
+{
+ struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+ bool cookie;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&queue->lock));
+
+ cookie = dma_fence_begin_signalling();
+
+ ret = iio_dma_can_enqueue_block(block);
+ if (ret < 0)
+ goto out_end_signalling;
+
+ block->bytes_used = size;
+ block->cyclic = cyclic;
+ block->sg_table = sgt;
+ block->fence = fence;
+
+ iio_dma_buffer_enqueue(queue, block);
+
+out_end_signalling:
+ dma_fence_end_signalling(cookie);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
+
+void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
+{
+ struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+
+ mutex_lock(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_lock_queue);
+
+void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
+{
+ struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+
+ mutex_unlock(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_unlock_queue);
+
/**
* iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
* @buffer: Buffer to set the bytes-per-datum for
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 918f6f8d65b6..12aa1412dfa0 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -65,25 +65,62 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
iio_buffer_to_dmaengine_buffer(&queue->buffer);
struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
+ struct scatterlist *sgl;
+ struct dma_vec *vecs;
size_t max_size;
dma_cookie_t cookie;
+ size_t len_total;
+ unsigned int i;
+ int nents;
max_size = min(block->size, dmaengine_buffer->max_size);
max_size = round_down(max_size, dmaengine_buffer->align);
- if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
- block->bytes_used = max_size;
+ if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
dma_dir = DMA_DEV_TO_MEM;
- } else {
+ else
dma_dir = DMA_MEM_TO_DEV;
- }
- if (!block->bytes_used || block->bytes_used > max_size)
- return -EINVAL;
+ if (block->sg_table) {
+ sgl = block->sg_table->sgl;
+ nents = sg_nents_for_len(sgl, block->bytes_used);
+ if (nents < 0)
+ return nents;
+
+ vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
+ if (!vecs)
+ return -ENOMEM;
+
+ len_total = block->bytes_used;
+
+ for (i = 0; i < nents; i++) {
+ vecs[i].addr = sg_dma_address(sgl);
+ vecs[i].len = min(sg_dma_len(sgl), len_total);
+ len_total -= vecs[i].len;
+
+ sgl = sg_next(sgl);
+ }
- desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
- block->phys_addr, block->bytes_used, dma_dir,
- DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
+ vecs, nents, dma_dir,
+ DMA_PREP_INTERRUPT);
+ kfree(vecs);
+ } else {
+ max_size = min(block->size, dmaengine_buffer->max_size);
+ max_size = round_down(max_size, dmaengine_buffer->align);
+
+ if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
+ block->bytes_used = max_size;
+
+ if (!block->bytes_used || block->bytes_used > max_size)
+ return -EINVAL;
+
+ desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
+ block->phys_addr,
+ block->bytes_used,
+ dma_dir,
+ DMA_PREP_INTERRUPT);
+ }
if (!desc)
return -ENOMEM;
@@ -133,6 +170,13 @@ static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
.space_available = iio_dma_buffer_usage,
.release = iio_dmaengine_buffer_release,
+ .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
+ .attach_dmabuf = iio_dma_buffer_attach_dmabuf,
+ .detach_dmabuf = iio_dma_buffer_detach_dmabuf,
+
+ .lock_queue = iio_dma_buffer_lock_queue,
+ .unlock_queue = iio_dma_buffer_unlock_queue,
+
.modes = INDIO_BUFFER_HARDWARE,
.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
};
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 05b285f0eb22..38034c8bcc04 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -287,4 +287,5 @@ int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_kfifo_buffer_setup_ext);
+MODULE_DESCRIPTION("Industrial I/O buffering based on kfifo");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 02649ab81b3c..678a6adb9a75 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -76,6 +76,26 @@ config CCS811
Say Y here to build I2C interface support for the AMS
CCS811 VOC (Volatile Organic Compounds) sensor
+config ENS160
+ tristate "ScioSense ENS160 sensor driver"
+ depends on (I2C || SPI)
+ select REGMAP
+ select ENS160_I2C if I2C
+ select ENS160_SPI if SPI
+ help
+ Say yes here to build support for ScioSense ENS160 multi-gas sensor.
+
+ This driver can also be built as a module. If so, the module for I2C
+ would be called ens160_i2c and ens160_spi for SPI support.
+
+config ENS160_I2C
+ tristate
+ select REGMAP_I2C
+
+config ENS160_SPI
+ tristate
+ select REGMAP_SPI
+
config IAQCORE
tristate "AMS iAQ-Core VOC sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 2f3dee8bb779..4866db06bdc9 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
obj-$(CONFIG_CCS811) += ccs811.o
+obj-$(CONFIG_ENS160) += ens160_core.o
+obj-$(CONFIG_ENS160_I2C) += ens160_i2c.o
+obj-$(CONFIG_ENS160_SPI) += ens160_spi.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_PMS7003) += pms7003.o
obj-$(CONFIG_SCD30_CORE) += scd30_core.o
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index 164facac5db6..10156d794092 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -24,7 +24,7 @@ struct ams_iaqcore_reading {
u8 status;
__be32 resistance;
__be16 voc_ppb;
-} __attribute__((__packed__));
+} __packed;
struct ams_iaqcore_data {
struct i2c_client *client;
@@ -163,7 +163,7 @@ static int ams_iaqcore_probe(struct i2c_client *client)
}
static const struct i2c_device_id ams_iaqcore_id[] = {
- { "ams-iaq-core", 0 },
+ { "ams-iaq-core" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ams_iaqcore_id);
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 4edc5d21cb9f..f959252a4fe6 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -54,7 +54,9 @@
#define BME680_NB_CONV_MASK GENMASK(3, 0)
#define BME680_REG_MEAS_STAT_0 0x1D
+#define BME680_NEW_DATA_BIT BIT(7)
#define BME680_GAS_MEAS_BIT BIT(6)
+#define BME680_MEAS_BIT BIT(5)
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ef5e0e46fd34..500f56834b01 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -10,6 +10,7 @@
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/log2.h>
@@ -38,7 +39,7 @@ struct bme680_calib {
s8 par_h3;
s8 par_h4;
s8 par_h5;
- s8 par_h6;
+ u8 par_h6;
s8 par_h7;
s8 par_gh1;
s16 par_gh2;
@@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
if (!calib->par_t2)
bme680_read_calib(data, calib);
- var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
+ var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
- var3 = (var3 * (calib->par_t3 << 4)) >> 14;
+ var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
data->t_fine = var2 + var3;
calc_temp = (data->t_fine * 5 + 128) >> 8;
@@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var1 = (data->t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
- var2 = (var2 >> 2) + (calib->par_p4 << 16);
+ var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
- (calib->par_p3 << 5)) >> 3) +
+ ((s32)calib->par_p3 << 5)) >> 3) +
((calib->par_p2 * var1) >> 1);
var1 = var1 >> 18;
var1 = ((32768 + var1) * calib->par_p1) >> 15;
@@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var3 = ((press_comp >> 8) * (press_comp >> 8) *
(press_comp >> 8) * calib->par_p10) >> 17;
- press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
+ press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
return press_comp;
}
@@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
var3 = var1 * var2;
- var4 = calib->par_h6 << 7;
+ var4 = (s32)calib->par_h6 << 7;
var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
var6 = (var4 * var5) >> 1;
@@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
return ilog2(val) + 1;
}
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
+ */
+static int bme680_wait_for_eoc(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int check;
+ int ret;
+ /*
+ * (Sum of oversampling ratios * time per oversampling) +
+ * TPH measurement + gas measurement + wait transition from forced mode
+ * + heater duration
+ */
+ int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
+ data->oversampling_humid) * 1936) + (477 * 4) +
+ (477 * 5) + 1000 + (data->heater_dur * 1000);
+
+ usleep_range(wait_eoc_us, wait_eoc_us + 100);
+
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (ret) {
+ dev_err(dev, "failed to read measurement status register.\n");
+ return ret;
+ }
+ if (check & BME680_MEAS_BIT) {
+ dev_err(dev, "Device measurement cycle incomplete.\n");
+ return -EBUSY;
+ }
+ if (!(check & BME680_NEW_DATA_BIT)) {
+ dev_err(dev, "No new data available from the device.\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
static int bme680_chip_config(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
@@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
&tmp, 3);
if (ret < 0) {
@@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
}
*val = bme680_compensate_press(data, adc_press);
- *val2 = 100;
+ *val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
if (check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 1c7076cf91ca..7c4224d75955 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -36,8 +36,8 @@ static int bme680_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id bme680_i2c_id[] = {
- {"bme680", 0},
- {},
+ { "bme680" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, bme680_i2c_id);
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 87741f155c32..17d1bc518bf2 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -551,7 +551,7 @@ static void ccs811_remove(struct i2c_client *client)
}
static const struct i2c_device_id ccs811_id[] = {
- {"ccs811", 0},
+ { "ccs811" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ccs811_id);
diff --git a/drivers/iio/chemical/ens160.h b/drivers/iio/chemical/ens160.h
new file mode 100644
index 000000000000..f9f0575ce55d
--- /dev/null
+++ b/drivers/iio/chemical/ens160.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ENS160_H_
+#define ENS160_H_
+
+int devm_ens160_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name);
+
+extern const struct dev_pm_ops ens160_pm_ops;
+
+#endif
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
new file mode 100644
index 000000000000..c1aa3b498d3b
--- /dev/null
+++ b/drivers/iio/chemical/ens160_core.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ScioSense ENS160 multi-gas sensor driver
+ *
+ * Copyright (c) 2024 Gustavo Silva <gustavograzs@gmail.com>
+ *
+ * Datasheet:
+ * https://www.sciosense.com/wp-content/uploads/2023/12/ENS160-Datasheet.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ens160.h"
+
+#define ENS160_PART_ID 0x160
+
+#define ENS160_BOOTING_TIME_MS 10U
+
+#define ENS160_REG_PART_ID 0x00
+
+#define ENS160_REG_OPMODE 0x10
+
+#define ENS160_REG_CONFIG 0x11
+#define ENS160_REG_CONFIG_INTEN BIT(0)
+#define ENS160_REG_CONFIG_INTDAT BIT(1)
+#define ENS160_REG_CONFIG_INT_CFG BIT(5)
+
+#define ENS160_REG_MODE_DEEP_SLEEP 0x00
+#define ENS160_REG_MODE_IDLE 0x01
+#define ENS160_REG_MODE_STANDARD 0x02
+#define ENS160_REG_MODE_RESET 0xF0
+
+#define ENS160_REG_COMMAND 0x12
+#define ENS160_REG_COMMAND_GET_APPVER 0x0E
+#define ENS160_REG_COMMAND_CLRGPR 0xCC
+
+#define ENS160_REG_TEMP_IN 0x13
+#define ENS160_REG_RH_IN 0x15
+#define ENS160_REG_DEVICE_STATUS 0x20
+#define ENS160_REG_DATA_AQI 0x21
+#define ENS160_REG_DATA_TVOC 0x22
+#define ENS160_REG_DATA_ECO2 0x24
+#define ENS160_REG_DATA_T 0x30
+#define ENS160_REG_DATA_RH 0x32
+#define ENS160_REG_GPR_READ4 0x4C
+
+#define ENS160_STATUS_VALIDITY_FLAG GENMASK(3, 2)
+
+#define ENS160_STATUS_NORMAL 0x00
+
+struct ens160_data {
+ struct regmap *regmap;
+ /* Protect reads from the sensor */
+ struct mutex mutex;
+ struct {
+ __le16 chans[2];
+ s64 timestamp __aligned(8);
+ } scan __aligned(IIO_DMA_MINALIGN);
+ u8 fw_version[3];
+ __le16 buf;
+};
+
+static const struct iio_chan_spec ens160_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = ENS160_REG_DATA_TVOC,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = ENS160_REG_DATA_ECO2,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int ens160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ens160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&data->mutex);
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &data->buf, sizeof(data->buf));
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(data->buf);
+ return IIO_VAL_INT;
+ }
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ /* The sensor reads CO2 data as ppm */
+ *val = 0;
+ *val2 = 100;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MOD_VOC:
+ /* The sensor reads VOC data as ppb */
+ *val = 0;
+ *val2 = 100;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ens160_set_mode(struct ens160_data *data, u8 mode)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, ENS160_REG_OPMODE, mode);
+ if (ret)
+ return ret;
+
+ msleep(ENS160_BOOTING_TIME_MS);
+
+ return 0;
+}
+
+static void ens160_set_idle(void *data)
+{
+ ens160_set_mode(data, ENS160_REG_MODE_IDLE);
+}
+
+static int ens160_chip_init(struct ens160_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int status;
+ int ret;
+
+ ret = ens160_set_mode(data, ENS160_REG_MODE_RESET);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, ENS160_REG_PART_ID, &data->buf,
+ sizeof(data->buf));
+ if (ret)
+ return ret;
+
+ if (le16_to_cpu(data->buf) != ENS160_PART_ID)
+ return -ENODEV;
+
+ ret = ens160_set_mode(data, ENS160_REG_MODE_IDLE);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, ENS160_REG_COMMAND,
+ ENS160_REG_COMMAND_CLRGPR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, ENS160_REG_COMMAND,
+ ENS160_REG_COMMAND_GET_APPVER);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, ENS160_REG_GPR_READ4,
+ data->fw_version, sizeof(data->fw_version));
+ if (ret)
+ return ret;
+
+ dev_info(dev, "firmware version: %u.%u.%u\n", data->fw_version[2],
+ data->fw_version[1], data->fw_version[0]);
+
+ ret = ens160_set_mode(data, ENS160_REG_MODE_STANDARD);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, ens160_set_idle, data);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, ENS160_REG_DEVICE_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(ENS160_STATUS_VALIDITY_FLAG, status)
+ != ENS160_STATUS_NORMAL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct iio_info ens160_info = {
+ .read_raw = ens160_read_raw,
+};
+
+static int ens160_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ens160_data *data = iio_priv(indio_dev);
+
+ return ens160_set_mode(data, ENS160_REG_MODE_DEEP_SLEEP);
+}
+
+static int ens160_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ens160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = ens160_set_mode(data, ENS160_REG_MODE_IDLE);
+ if (ret)
+ return ret;
+
+ return ens160_set_mode(data, ENS160_REG_MODE_STANDARD);
+}
+EXPORT_NS_SIMPLE_DEV_PM_OPS(ens160_pm_ops, ens160_suspend, ens160_resume,
+ IIO_ENS160);
+
+static irqreturn_t ens160_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ens160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->mutex);
+
+ ret = regmap_bulk_read(data->regmap, ENS160_REG_DATA_TVOC,
+ data->scan.chans, sizeof(data->scan.chans));
+ if (ret)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ens160_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct ens160_data *data = iio_priv(indio_dev);
+ unsigned int int_bits = ENS160_REG_CONFIG_INTEN |
+ ENS160_REG_CONFIG_INTDAT |
+ ENS160_REG_CONFIG_INT_CFG;
+
+ if (state)
+ return regmap_set_bits(data->regmap, ENS160_REG_CONFIG,
+ int_bits);
+ else
+ return regmap_clear_bits(data->regmap, ENS160_REG_CONFIG,
+ int_bits);
+}
+
+static const struct iio_trigger_ops ens160_trigger_ops = {
+ .set_trigger_state = ens160_set_trigger_state,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int ens160_setup_trigger(struct iio_dev *indio_dev, int irq)
+{
+ struct device *dev = indio_dev->dev.parent;
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to allocate trigger\n");
+
+ trig->ops = &ens160_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+
+ ret = devm_iio_trigger_register(dev, trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(trig);
+
+ ret = devm_request_threaded_irq(dev, irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_ONESHOT,
+ indio_dev->name,
+ indio_dev->trig);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+
+ return 0;
+}
+
+int devm_ens160_core_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name)
+{
+ struct ens160_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+
+ indio_dev->name = name;
+ indio_dev->info = &ens160_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ens160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ens160_channels);
+
+ if (irq > 0) {
+ ret = ens160_setup_trigger(indio_dev, irq);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to setup trigger\n");
+ }
+
+ ret = ens160_chip_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "chip initialization failed\n");
+
+ mutex_init(&data->mutex);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ens160_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS(devm_ens160_core_probe, IIO_ENS160);
+
+MODULE_AUTHOR("Gustavo Silva <gustavograzs@gmail.com>");
+MODULE_DESCRIPTION("ScioSense ENS160 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/ens160_i2c.c b/drivers/iio/chemical/ens160_i2c.c
new file mode 100644
index 000000000000..57a189a4c257
--- /dev/null
+++ b/drivers/iio/chemical/ens160_i2c.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ScioSense ENS160 multi-gas sensor I2C driver
+ *
+ * Copyright (c) 2024 Gustavo Silva <gustavograzs@gmail.com>
+ *
+ * 7-Bit I2C slave address is:
+ * - 0x52 if ADDR pin LOW
+ * - 0x53 if ADDR pin HIGH
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ens160.h"
+
+static const struct regmap_config ens160_regmap_i2c_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ens160_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ens160_regmap_i2c_conf);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "Failed to register i2c regmap\n");
+
+ return devm_ens160_core_probe(&client->dev, regmap, client->irq,
+ "ens160");
+}
+
+static const struct i2c_device_id ens160_i2c_id[] = {
+ { "ens160" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ens160_i2c_id);
+
+static const struct of_device_id ens160_of_i2c_match[] = {
+ { .compatible = "sciosense,ens160" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ens160_of_i2c_match);
+
+static struct i2c_driver ens160_i2c_driver = {
+ .driver = {
+ .name = "ens160",
+ .of_match_table = ens160_of_i2c_match,
+ .pm = pm_sleep_ptr(&ens160_pm_ops),
+ },
+ .probe = ens160_i2c_probe,
+ .id_table = ens160_i2c_id,
+};
+module_i2c_driver(ens160_i2c_driver);
+
+MODULE_AUTHOR("Gustavo Silva <gustavograzs@gmail.com>");
+MODULE_DESCRIPTION("ScioSense ENS160 I2C driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_ENS160);
diff --git a/drivers/iio/chemical/ens160_spi.c b/drivers/iio/chemical/ens160_spi.c
new file mode 100644
index 000000000000..10e4f5fd0f45
--- /dev/null
+++ b/drivers/iio/chemical/ens160_spi.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ScioSense ENS160 multi-gas sensor SPI driver
+ *
+ * Copyright (c) 2024 Gustavo Silva <gustavograzs@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ens160.h"
+
+#define ENS160_SPI_READ BIT(0)
+
+static const struct regmap_config ens160_regmap_spi_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_shift = -1,
+ .read_flag_mask = ENS160_SPI_READ,
+};
+
+static int ens160_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &ens160_regmap_spi_conf);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(regmap),
+ "Failed to register spi regmap\n");
+
+ return devm_ens160_core_probe(&spi->dev, regmap, spi->irq, "ens160");
+}
+
+static const struct of_device_id ens160_spi_of_match[] = {
+ { .compatible = "sciosense,ens160" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ens160_spi_of_match);
+
+static const struct spi_device_id ens160_spi_id[] = {
+ { "ens160" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ens160_spi_id);
+
+static struct spi_driver ens160_spi_driver = {
+ .driver = {
+ .name = "ens160",
+ .of_match_table = ens160_spi_of_match,
+ .pm = pm_sleep_ptr(&ens160_pm_ops),
+ },
+ .probe = ens160_spi_probe,
+ .id_table = ens160_spi_id,
+};
+module_spi_driver(ens160_spi_driver);
+
+MODULE_AUTHOR("Gustavo Silva <gustavograzs@gmail.com>");
+MODULE_DESCRIPTION("ScioSense ENS160 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_ENS160);
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index fa205f17bd90..f44458c380d9 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
uint32_t period, bool fifo)
{
+ uint32_t mult;
+
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
- ts->new_mult = period / ts->chip.clock_period;
+ mult = period / ts->chip.clock_period;
+ if (mult != ts->mult)
+ ts->new_mult = mult;
return 0;
}
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 0c2caf3570db..7190eaede7fb 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -626,12 +626,10 @@ scmi_alloc_iiodev(struct scmi_device *sdev,
SCMI_PROTOCOL_SENSOR, SCMI_EVENT_SENSOR_UPDATE,
&sensor->sensor_info->id,
&sensor->sensor_update_nb);
- if (ret) {
- dev_err(&iiodev->dev,
- "Error in registering sensor update notifier for sensor %s err %d",
- sensor->sensor_info->name, ret);
- return ERR_PTR(ret);
- }
+ if (ret)
+ return dev_err_ptr_probe(&iiodev->dev, ret,
+ "Error in registering sensor update notifier for sensor %s\n",
+ sensor->sensor_info->name);
scmi_iio_set_timestamp_channel(&iio_channels[i], i);
iiodev->channels = iio_channels;
@@ -653,10 +651,9 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
return -ENODEV;
sensor_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_SENSOR, &ph);
- if (IS_ERR(sensor_ops)) {
- dev_err(dev, "SCMI device has no sensor interface\n");
- return PTR_ERR(sensor_ops);
- }
+ if (IS_ERR(sensor_ops))
+ return dev_err_probe(dev, PTR_ERR(sensor_ops),
+ "SCMI device has no sensor interface\n");
nr_sensors = sensor_ops->count_get(ph);
if (!nr_sensors) {
@@ -667,8 +664,8 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
for (i = 0; i < nr_sensors; i++) {
sensor_info = sensor_ops->info_get(ph, i);
if (!sensor_info) {
- dev_err(dev, "SCMI sensor %d has missing info\n", i);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL,
+ "SCMI sensor %d has missing info\n", i);
}
/* This driver only supports 3-axis accel and gyro, skipping other sensors */
@@ -683,29 +680,25 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
scmi_iio_dev = scmi_alloc_iiodev(sdev, sensor_ops, ph,
sensor_info);
if (IS_ERR(scmi_iio_dev)) {
- dev_err(dev,
- "failed to allocate IIO device for sensor %s: %ld\n",
- sensor_info->name, PTR_ERR(scmi_iio_dev));
- return PTR_ERR(scmi_iio_dev);
+ return dev_err_probe(dev, PTR_ERR(scmi_iio_dev),
+ "failed to allocate IIO device for sensor %s\n",
+ sensor_info->name);
}
err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev,
scmi_iio_dev,
&scmi_iio_buffer_ops);
if (err < 0) {
- dev_err(dev,
- "IIO buffer setup error at sensor %s: %d\n",
- sensor_info->name, err);
- return err;
+ return dev_err_probe(dev, err,
+ "IIO buffer setup error at sensor %s\n",
+ sensor_info->name);
}
err = devm_iio_device_register(dev, scmi_iio_dev);
- if (err) {
- dev_err(dev,
- "IIO device registration failed at sensor %s: %d\n",
- sensor_info->name, err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "IIO device registration failed at sensor %s\n",
+ sensor_info->name);
}
return err;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index c77d7bdcc121..c69399ac6657 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -606,10 +606,9 @@ int st_sensors_verify_id(struct iio_dev *indio_dev)
}
if (sdata->sensor_settings->wai != wai) {
- dev_err(&indio_dev->dev,
+ dev_warn(&indio_dev->dev,
"%s: WhoAmI mismatch (0x%x).\n",
indio_dev->name, wai);
- return -EINVAL;
}
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 3c2bf620f00f..a2596c2d3de3 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -133,7 +133,7 @@ config AD5624R_SPI
config AD9739A
tristate "Analog Devices AD9739A RF DAC spi driver"
- depends on SPI || COMPILE_TEST
+ depends on SPI
select REGMAP_SPI
select IIO_BACKEND
help
@@ -149,6 +149,7 @@ config AD9739A
config ADI_AXI_DAC
tristate "Analog Devices Generic AXI DAC IP core driver"
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
select IIO_BUFFER
select IIO_BUFFER_DMAENGINE
select REGMAP_MMIO
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index 8aa942896b5b..bd37d304ca70 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -117,7 +117,7 @@
#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3)
/* Useful defines */
-#define AD3552R_NUM_CH 2
+#define AD3552R_MAX_CH 2
#define AD3552R_MASK_CH(ch) BIT(ch)
#define AD3552R_MASK_ALL_CH GENMASK(1, 0)
#define AD3552R_MAX_REG_SIZE 3
@@ -139,8 +139,10 @@ enum ad3552r_ch_vref_select {
AD3552R_EXTERNAL_VREF_PIN_INPUT
};
-enum ad3542r_id {
+enum ad3552r_id {
+ AD3541R_ID = 0x400b,
AD3542R_ID = 0x4009,
+ AD3551R_ID = 0x400a,
AD3552R_ID = 0x4008,
};
@@ -261,17 +263,26 @@ struct ad3552r_ch_data {
bool range_override;
};
+struct ad3552r_model_data {
+ const char *model_name;
+ enum ad3552r_id chip_id;
+ unsigned int num_hw_channels;
+ const s32 (*ranges_table)[2];
+ int num_ranges;
+ bool requires_output_range;
+};
+
struct ad3552r_desc {
+ const struct ad3552r_model_data *model_data;
/* Used to look the spi bus for atomic operations where needed */
struct mutex lock;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_ldac;
struct spi_device *spi;
- struct ad3552r_ch_data ch_data[AD3552R_NUM_CH];
- struct iio_chan_spec channels[AD3552R_NUM_CH + 1];
+ struct ad3552r_ch_data ch_data[AD3552R_MAX_CH];
+ struct iio_chan_spec channels[AD3552R_MAX_CH + 1];
unsigned long enabled_ch;
unsigned int num_ch;
- enum ad3542r_id chip_id;
};
static const u16 addr_mask_map[][2] = {
@@ -528,7 +539,7 @@ static int32_t ad3552r_trigger_hw_ldac(struct gpio_desc *ldac)
static int ad3552r_write_all_channels(struct ad3552r_desc *dac, u8 *data)
{
int err, len;
- u8 addr, buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE + 1];
+ u8 addr, buff[AD3552R_MAX_CH * AD3552R_MAX_REG_SIZE + 1];
addr = AD3552R_REG_ADDR_CH_INPUT_24B(1);
/* CH1 */
@@ -586,7 +597,7 @@ static irqreturn_t ad3552r_trigger_handler(int irq, void *p)
struct iio_buffer *buf = indio_dev->buffer;
struct ad3552r_desc *dac = iio_priv(indio_dev);
/* Maximum size of a scan */
- u8 buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE];
+ u8 buff[AD3552R_MAX_CH * AD3552R_MAX_REG_SIZE];
int err;
memset(buff, 0, sizeof(buff));
@@ -745,13 +756,8 @@ static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
} else {
/* Normal range */
idx = dac->ch_data[ch].range;
- if (dac->chip_id == AD3542R_ID) {
- v_min = ad3542r_ch_ranges[idx][0];
- v_max = ad3542r_ch_ranges[idx][1];
- } else {
- v_min = ad3552r_ch_ranges[idx][0];
- v_max = ad3552r_ch_ranges[idx][1];
- }
+ v_min = dac->model_data->ranges_table[idx][0];
+ v_max = dac->model_data->ranges_table[idx][1];
}
/*
@@ -775,22 +781,14 @@ static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
dac->ch_data[ch].offset_dec = div_s64(tmp, span);
}
-static int ad3552r_find_range(u16 id, s32 *vals)
+static int ad3552r_find_range(const struct ad3552r_model_data *model_data,
+ s32 *vals)
{
- int i, len;
- const s32 (*ranges)[2];
+ int i;
- if (id == AD3542R_ID) {
- len = ARRAY_SIZE(ad3542r_ch_ranges);
- ranges = ad3542r_ch_ranges;
- } else {
- len = ARRAY_SIZE(ad3552r_ch_ranges);
- ranges = ad3552r_ch_ranges;
- }
-
- for (i = 0; i < len; i++)
- if (vals[0] == ranges[i][0] * 1000 &&
- vals[1] == ranges[i][1] * 1000)
+ for (i = 0; i < model_data->num_ranges; i++)
+ if (vals[0] == model_data->ranges_table[i][0] * 1000 &&
+ vals[1] == model_data->ranges_table[i][1] * 1000)
return i;
return -EINVAL;
@@ -859,15 +857,9 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
return 0;
}
-static void ad3552r_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad3552r_configure_device(struct ad3552r_desc *dac)
{
struct device *dev = &dac->spi->dev;
- struct regulator *vref;
int err, cnt = 0, voltage, delta = 100000;
u32 vals[2], val, ch;
@@ -876,30 +868,16 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac),
"Error getting gpio ldac");
- vref = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(vref)) {
- if (PTR_ERR(vref) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(vref),
- "Error getting vref");
+ voltage = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (voltage < 0 && voltage != -ENODEV)
+ return dev_err_probe(dev, voltage, "Error getting vref voltage\n");
+ if (voltage == -ENODEV) {
if (device_property_read_bool(dev, "adi,vref-out-en"))
val = AD3552R_INTERNAL_VREF_PIN_2P5V;
else
val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
} else {
- err = regulator_enable(vref);
- if (err) {
- dev_err(dev, "Failed to enable external vref supply\n");
- return err;
- }
-
- err = devm_add_action_or_reset(dev, ad3552r_reg_disable, vref);
- if (err) {
- regulator_disable(vref);
- return err;
- }
-
- voltage = regulator_get_voltage(vref);
if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
dev_warn(dev, "vref-supply must be 2.5V");
return -EINVAL;
@@ -940,10 +918,10 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
if (err)
return dev_err_probe(dev, err,
"mandatory reg property missing\n");
- if (ch >= AD3552R_NUM_CH)
+ if (ch >= dac->model_data->num_hw_channels)
return dev_err_probe(dev, -EINVAL,
"reg must be less than %d\n",
- AD3552R_NUM_CH);
+ dac->model_data->num_hw_channels);
if (fwnode_property_present(child, "adi,output-range-microvolt")) {
err = fwnode_property_read_u32_array(child,
@@ -954,7 +932,7 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
return dev_err_probe(dev, err,
"adi,output-range-microvolt property could not be parsed\n");
- err = ad3552r_find_range(dac->chip_id, vals);
+ err = ad3552r_find_range(dac->model_data, vals);
if (err < 0)
return dev_err_probe(dev, err,
"Invalid adi,output-range-microvolt value\n");
@@ -967,9 +945,10 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
return err;
dac->ch_data[ch].range = val;
- } else if (dac->chip_id == AD3542R_ID) {
+ } else if (dac->model_data->requires_output_range) {
return dev_err_probe(dev, -EINVAL,
- "adi,output-range-microvolt is required for ad3542r\n");
+ "adi,output-range-microvolt is required for %s\n",
+ dac->model_data->model_name);
} else {
err = ad3552r_configure_custom_gain(dac, child, ch);
if (err)
@@ -989,7 +968,8 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
}
/* Disable unused channels */
- for_each_clear_bit(ch, &dac->enabled_ch, AD3552R_NUM_CH) {
+ for_each_clear_bit(ch, &dac->enabled_ch,
+ dac->model_data->num_hw_channels) {
err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN,
ch, 1);
if (err)
@@ -1032,7 +1012,7 @@ static int ad3552r_init(struct ad3552r_desc *dac)
}
id |= val << 8;
- if (id != dac->chip_id) {
+ if (id != dac->model_data->chip_id) {
dev_err(&dac->spi->dev, "Product id not matching\n");
return -ENODEV;
}
@@ -1042,7 +1022,6 @@ static int ad3552r_init(struct ad3552r_desc *dac)
static int ad3552r_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
struct ad3552r_desc *dac;
struct iio_dev *indio_dev;
int err;
@@ -1053,7 +1032,9 @@ static int ad3552r_probe(struct spi_device *spi)
dac = iio_priv(indio_dev);
dac->spi = spi;
- dac->chip_id = id->driver_data;
+ dac->model_data = spi_get_device_match_data(spi);
+ if (!dac->model_data)
+ return -EINVAL;
mutex_init(&dac->lock);
@@ -1062,10 +1043,7 @@ static int ad3552r_probe(struct spi_device *spi)
return err;
/* Config triggered buffer device */
- if (dac->chip_id == AD3552R_ID)
- indio_dev->name = "ad3552r";
- else
- indio_dev->name = "ad3542r";
+ indio_dev->name = dac->model_data->model_name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ad3552r_iio_info;
indio_dev->num_channels = dac->num_ch;
@@ -1083,16 +1061,68 @@ static int ad3552r_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
+static const struct ad3552r_model_data ad3541r_model_data = {
+ .model_name = "ad3541r",
+ .chip_id = AD3541R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+};
+
+static const struct ad3552r_model_data ad3542r_model_data = {
+ .model_name = "ad3542r",
+ .chip_id = AD3542R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+};
+
+static const struct ad3552r_model_data ad3551r_model_data = {
+ .model_name = "ad3551r",
+ .chip_id = AD3551R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+};
+
+static const struct ad3552r_model_data ad3552r_model_data = {
+ .model_name = "ad3552r",
+ .chip_id = AD3552R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+};
+
static const struct spi_device_id ad3552r_id[] = {
- { "ad3542r", AD3542R_ID },
- { "ad3552r", AD3552R_ID },
+ {
+ .name = "ad3541r",
+ .driver_data = (kernel_ulong_t)&ad3541r_model_data
+ },
+ {
+ .name = "ad3542r",
+ .driver_data = (kernel_ulong_t)&ad3542r_model_data
+ },
+ {
+ .name = "ad3551r",
+ .driver_data = (kernel_ulong_t)&ad3551r_model_data
+ },
+ {
+ .name = "ad3552r",
+ .driver_data = (kernel_ulong_t)&ad3552r_model_data
+ },
{ }
};
MODULE_DEVICE_TABLE(spi, ad3552r_id);
static const struct of_device_id ad3552r_of_match[] = {
- { .compatible = "adi,ad3542r"},
- { .compatible = "adi,ad3552r"},
+ { .compatible = "adi,ad3541r", .data = &ad3541r_model_data },
+ { .compatible = "adi,ad3542r", .data = &ad3542r_model_data },
+ { .compatible = "adi,ad3551r", .data = &ad3551r_model_data },
+ { .compatible = "adi,ad3552r", .data = &ad3552r_model_data },
{ }
};
MODULE_DEVICE_TABLE(of, ad3552r_of_match);
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 076bc9ecfb49..4763402dbcd6 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index 880d83a014a1..6d56428e623d 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -545,7 +545,8 @@ static int axi_dac_probe(struct platform_device *pdev)
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
- return PTR_ERR(clk);
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "failed to get clock\n");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -555,7 +556,8 @@ static int axi_dac_probe(struct platform_device *pdev)
st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&axi_dac_regmap_config);
if (IS_ERR(st->regmap))
- return PTR_ERR(st->regmap);
+ return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap),
+ "failed to init register map\n");
/*
* Force disable the core. Up to the frontend to enable us. And we can
@@ -601,7 +603,8 @@ static int axi_dac_probe(struct platform_device *pdev)
mutex_init(&st->lock);
ret = devm_iio_backend_register(&pdev->dev, &axi_dac_generic, st);
if (ret)
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register iio backend\n");
dev_info(&pdev->dev, "AXI DAC IP core (%d.%.2d.%c) probed\n",
ADI_AXI_PCORE_VER_MAJOR(ver),
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index c4b1ba30f935..af50d2a95898 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -860,9 +860,8 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
/* bring device out of reset */
gpiod_set_value_cansleep(gpio, 0);
} else {
- ret = regmap_update_bits(st->regmap, LTC2688_CMD_CONFIG,
- LTC2688_CONFIG_RST,
- LTC2688_CONFIG_RST);
+ ret = regmap_set_bits(st->regmap, LTC2688_CMD_CONFIG,
+ LTC2688_CONFIG_RST);
if (ret)
return ret;
}
diff --git a/drivers/iio/dac/max5522.c b/drivers/iio/dac/max5522.c
index 05034a306597..9f72155dcbc7 100644
--- a/drivers/iio/dac/max5522.c
+++ b/drivers/iio/dac/max5522.c
@@ -132,7 +132,6 @@ static const struct regmap_config max5522_regmap_config = {
static int max5522_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct max5522_state *state;
int ret;
@@ -144,13 +143,9 @@ static int max5522_spi_probe(struct spi_device *spi)
}
state = iio_priv(indio_dev);
- state->chip_info = device_get_match_data(&spi->dev);
- if (!state->chip_info) {
- state->chip_info =
- (struct max5522_chip_info *)(id->driver_data);
- if (!state->chip_info)
- return -EINVAL;
- }
+ state->chip_info = spi_get_device_match_data(spi);
+ if (!state->chip_info)
+ return -EINVAL;
state->vrefin_reg = devm_regulator_get(&spi->dev, "vrefin");
if (IS_ERR(state->vrefin_reg))
diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c
index 5113f67ddc31..c449ca949465 100644
--- a/drivers/iio/dac/mcp4728.c
+++ b/drivers/iio/dac/mcp4728.c
@@ -591,7 +591,7 @@ static int mcp4728_probe(struct i2c_client *client)
}
static const struct i2c_device_id mcp4728_id[] = {
- { "mcp4728", 0 },
+ { "mcp4728" },
{}
};
MODULE_DEVICE_TABLE(i2c, mcp4728_id);
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index e150ac729154..2d567073996b 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -200,9 +200,8 @@ static int stm32_dac_core_resume(struct device *dev)
if (priv->common.hfsel) {
/* restore hfsel (maybe lost under low power state) */
- ret = regmap_update_bits(priv->common.regmap, STM32_DAC_CR,
- STM32H7_DAC_CR_HFSEL,
- STM32H7_DAC_CR_HFSEL);
+ ret = regmap_set_bits(priv->common.regmap, STM32_DAC_CR,
+ STM32H7_DAC_CR_HFSEL);
if (ret)
return ret;
}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 4abf80f75ef5..e13e64a5164c 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -19,6 +19,7 @@
#include <linux/gpio/consumer.h>
#include <asm/div64.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -36,6 +37,9 @@ struct adf4350_state {
struct gpio_desc *lock_detect_gpiod;
struct adf4350_platform_data *pdata;
struct clk *clk;
+ struct clk *clkout;
+ const char *clk_out_name;
+ struct clk_hw hw;
unsigned long clkin;
unsigned long chspc; /* Channel Spacing */
unsigned long fpfd; /* Phase Frequency Detector */
@@ -61,6 +65,8 @@ struct adf4350_state {
__be32 val __aligned(IIO_DMA_MINALIGN);
};
+#define to_adf4350_state(_hw) container_of(_hw, struct adf4350_state, hw)
+
static struct adf4350_platform_data default_pdata = {
.channel_spacing = 10000,
.r2_user_settings = ADF4350_REG2_PD_POLARITY_POS |
@@ -381,6 +387,113 @@ static const struct iio_info adf4350_info = {
.debugfs_reg_access = &adf4350_reg_access,
};
+static void adf4350_clk_del_provider(void *data)
+{
+ struct adf4350_state *st = data;
+
+ of_clk_del_provider(st->spi->dev.of_node);
+}
+
+static unsigned long adf4350_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct adf4350_state *st = to_adf4350_state(hw);
+ unsigned long long tmp;
+
+ tmp = (u64)(st->r0_int * st->r1_mod + st->r0_fract) * st->fpfd;
+ do_div(tmp, st->r1_mod * (1 << st->r4_rf_div_sel));
+
+ return tmp;
+}
+
+static int adf4350_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct adf4350_state *st = to_adf4350_state(hw);
+
+ if (parent_rate == 0 || parent_rate > ADF4350_MAX_FREQ_REFIN)
+ return -EINVAL;
+
+ st->clkin = parent_rate;
+
+ return adf4350_set_freq(st, rate);
+}
+
+static int adf4350_clk_prepare(struct clk_hw *hw)
+{
+ struct adf4350_state *st = to_adf4350_state(hw);
+
+ st->regs[ADF4350_REG2] &= ~ADF4350_REG2_POWER_DOWN_EN;
+
+ return adf4350_sync_config(st);
+}
+
+static void adf4350_clk_unprepare(struct clk_hw *hw)
+{
+ struct adf4350_state *st = to_adf4350_state(hw);
+
+ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+
+ adf4350_sync_config(st);
+}
+
+static int adf4350_clk_is_enabled(struct clk_hw *hw)
+{
+ struct adf4350_state *st = to_adf4350_state(hw);
+
+ return (st->regs[ADF4350_REG2] & ADF4350_REG2_POWER_DOWN_EN);
+}
+
+static const struct clk_ops adf4350_clk_ops = {
+ .recalc_rate = adf4350_clk_recalc_rate,
+ .set_rate = adf4350_clk_set_rate,
+ .prepare = adf4350_clk_prepare,
+ .unprepare = adf4350_clk_unprepare,
+ .is_enabled = adf4350_clk_is_enabled,
+};
+
+static int adf4350_clk_register(struct adf4350_state *st)
+{
+ struct spi_device *spi = st->spi;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *parent_name;
+ int ret;
+
+ if (!device_property_present(&spi->dev, "#clock-cells"))
+ return 0;
+
+ if (device_property_read_string(&spi->dev, "clock-output-names", &init.name)) {
+ init.name = devm_kasprintf(&spi->dev, GFP_KERNEL, "%s-clk",
+ fwnode_get_name(dev_fwnode(&spi->dev)));
+ if (!init.name)
+ return -ENOMEM;
+ }
+
+ parent_name = of_clk_get_parent_name(spi->dev.of_node, 0);
+ if (!parent_name)
+ return -EINVAL;
+
+ init.ops = &adf4350_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ st->hw.init = &init;
+ clk = devm_clk_register(&spi->dev, &st->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(spi->dev.of_node, of_clk_src_simple_get, clk);
+ if (ret)
+ return ret;
+
+ st->clkout = clk;
+
+ return devm_add_action_or_reset(&spi->dev, adf4350_clk_del_provider, st);
+}
+
static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
{
struct adf4350_platform_data *pdata;
@@ -522,8 +635,6 @@ static int adf4350_probe(struct spi_device *spi)
indio_dev->info = &adf4350_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = &adf4350_chan;
- indio_dev->num_channels = 1;
mutex_init(&st->lock);
@@ -551,6 +662,15 @@ static int adf4350_probe(struct spi_device *spi)
return ret;
}
+ ret = adf4350_clk_register(st);
+ if (ret)
+ return ret;
+
+ if (!st->clkout) {
+ indio_dev->channels = &adf4350_chan;
+ indio_dev->num_channels = 1;
+ }
+
ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
if (ret)
return dev_err_probe(&spi->dev, ret,
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
index b4defb82f37e..3f46032c9275 100644
--- a/drivers/iio/frequency/adrf6780.c
+++ b/drivers/iio/frequency/adrf6780.c
@@ -9,7 +9,6 @@
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index c95cf41be34b..da83adc684d0 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -221,13 +221,12 @@ static ssize_t adis16136_read_frequency(struct device *dev,
unsigned int freq;
int ret;
- adis_dev_lock(&adis16136->adis);
+ adis_dev_auto_lock(&adis16136->adis);
ret = __adis16136_get_freq(adis16136, &freq);
- adis_dev_unlock(&adis16136->adis);
if (ret)
return ret;
- return sprintf(buf, "%d\n", freq);
+ return sysfs_emit(buf, "%d\n", freq);
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -251,21 +250,17 @@ static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
unsigned int freq;
int i, ret;
- adis_dev_lock(&adis16136->adis);
+ adis_dev_auto_lock(&adis16136->adis);
ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
- goto out_unlock;
+ return ret;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
if (freq / adis16136_3db_divisors[i] >= val)
break;
}
- ret = __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
-out_unlock:
- adis_dev_unlock(&adis16136->adis);
-
- return ret;
+ return __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
}
static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
@@ -275,23 +270,20 @@ static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
uint16_t val16;
int ret;
- adis_dev_lock(&adis16136->adis);
+ adis_dev_auto_lock(&adis16136->adis);
ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT,
&val16);
if (ret)
- goto err_unlock;
+ return ret;
ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
- goto err_unlock;
+ return ret;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
-err_unlock:
- adis_dev_unlock(&adis16136->adis);
-
- return ret ? ret : IIO_VAL_INT;
+ return IIO_VAL_INT;
}
static int adis16136_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 112d635b7dfd..495b64a27061 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -270,7 +270,6 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
{
struct adis16260 *adis16260 = iio_priv(indio_dev);
struct adis *adis = &adis16260->adis;
- int ret;
u8 addr;
u8 t;
@@ -288,7 +287,6 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
addr = adis16260_addresses[chan->scan_index][1];
return adis_write_reg_16(adis, addr, val);
case IIO_CHAN_INFO_SAMP_FREQ:
- adis_dev_lock(adis);
if (spi_get_device_id(adis->spi)->driver_data)
t = 256 / val;
else
@@ -298,15 +296,14 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
t = ADIS16260_SMPL_PRD_DIV_MASK;
else if (t > 0)
t--;
-
- if (t >= 0x0A)
- adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
- else
- adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
- ret = __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
-
- adis_dev_unlock(adis);
- return ret;
+ adis_dev_auto_scoped_lock(adis) {
+ if (t >= 0x0A)
+ adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
+ else
+ adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
+ return __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
+ }
+ unreachable();
}
return -EINVAL;
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 0e2eb0e98235..10728d5ccae3 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -285,8 +285,8 @@ static int bmg160_chip_init(struct bmg160_data *data)
data->slope_thres = val;
/* Set default interrupt mode */
- ret = regmap_update_bits(data->regmap, BMG160_REG_INT_EN_1,
- BMG160_INT1_BIT_OD, 0);
+ ret = regmap_clear_bits(data->regmap, BMG160_REG_INT_EN_1,
+ BMG160_INT1_BIT_OD);
if (ret < 0) {
dev_err(dev, "Error updating bits in reg_int_en_1\n");
return ret;
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 9c8e20c25e96..672d0b720f61 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -47,9 +47,9 @@ static const struct acpi_device_id bmg160_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
static const struct i2c_device_id bmg160_i2c_id[] = {
- {"bmg160", 0},
- {"bmi055_gyro", 0},
- {"bmi088_gyro", 0},
+ { "bmg160" },
+ { "bmi055_gyro" },
+ { "bmi088_gyro" },
{}
};
diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c
index ee7f21b718e2..b1318a1ea41b 100644
--- a/drivers/iio/gyro/fxas21002c_i2c.c
+++ b/drivers/iio/gyro/fxas21002c_i2c.c
@@ -39,7 +39,7 @@ static void fxas21002c_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id fxas21002c_i2c_id[] = {
- { "fxas21002c", 0 },
+ { "fxas21002c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fxas21002c_i2c_id);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 53fb92f0ac7e..cd8a2dae56cd 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -387,7 +387,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(itg3200_pm_ops, itg3200_suspend,
itg3200_resume);
static const struct i2c_device_id itg3200_id[] = {
- { "itg3200", 0 },
+ { "itg3200" },
{ }
};
MODULE_DEVICE_TABLE(i2c, itg3200_id);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index a791ba3a693a..35af68b41408 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -197,8 +197,8 @@ static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
int i;
/* Reset */
- ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
- MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
+ ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET);
if (ret)
return ret;
@@ -513,12 +513,9 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
"FIFO overflow! Emptying and resetting FIFO\n");
fifo_overflow = true;
/* Reset and enable the FIFO */
- ret = regmap_update_bits(mpu3050->map,
- MPU3050_USR_CTRL,
- MPU3050_USR_CTRL_FIFO_EN |
- MPU3050_USR_CTRL_FIFO_RST,
- MPU3050_USR_CTRL_FIFO_EN |
- MPU3050_USR_CTRL_FIFO_RST);
+ ret = regmap_set_bits(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
if (ret) {
dev_info(mpu3050->dev, "error resetting FIFO\n");
goto out_trigger_unlock;
@@ -799,10 +796,8 @@ static int mpu3050_hw_init(struct mpu3050 *mpu3050)
u64 otp;
/* Reset */
- ret = regmap_update_bits(mpu3050->map,
- MPU3050_PWR_MGM,
- MPU3050_PWR_MGM_RESET,
- MPU3050_PWR_MGM_RESET);
+ ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET);
if (ret)
return ret;
@@ -872,8 +867,8 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
msleep(200);
/* Take device out of sleep mode */
- ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
- MPU3050_PWR_MGM_SLEEP, 0);
+ ret = regmap_clear_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP);
if (ret) {
regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
dev_err(mpu3050->dev, "error setting power mode\n");
@@ -895,8 +890,8 @@ static int mpu3050_power_down(struct mpu3050 *mpu3050)
* then we would be wasting power unless we go to sleep mode
* first.
*/
- ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
- MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
+ ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP);
if (ret)
dev_err(mpu3050->dev, "error putting to sleep\n");
@@ -997,11 +992,9 @@ static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
return ret;
/* Reset and enable the FIFO */
- ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
- MPU3050_USR_CTRL_FIFO_EN |
- MPU3050_USR_CTRL_FIFO_RST,
- MPU3050_USR_CTRL_FIFO_EN |
- MPU3050_USR_CTRL_FIFO_RST);
+ ret = regmap_set_bits(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
if (ret)
return ret;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 1dbe48dae74e..52326dc521ac 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,9 +422,8 @@ static int afe4403_suspend(struct device *dev)
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
- ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
- AFE440X_CONTROL2_PDN_AFE,
- AFE440X_CONTROL2_PDN_AFE);
+ ret = regmap_set_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE);
if (ret)
return ret;
@@ -449,8 +448,8 @@ static int afe4403_resume(struct device *dev)
return ret;
}
- ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
- AFE440X_CONTROL2_PDN_AFE, 0);
+ ret = regmap_clear_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE);
if (ret)
return ret;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 7768b07ef7a6..7f69baa1ed53 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -430,9 +430,8 @@ static int afe4404_suspend(struct device *dev)
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
- ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
- AFE440X_CONTROL2_PDN_AFE,
- AFE440X_CONTROL2_PDN_AFE);
+ ret = regmap_set_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE);
if (ret)
return ret;
@@ -457,8 +456,8 @@ static int afe4404_resume(struct device *dev)
return ret;
}
- ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
- AFE440X_CONTROL2_PDN_AFE, 0);
+ ret = regmap_clear_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE);
if (ret)
return ret;
@@ -582,7 +581,7 @@ static int afe4404_probe(struct i2c_client *client)
}
static const struct i2c_device_id afe4404_ids[] = {
- { "afe4404", 0 },
+ { "afe4404" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, afe4404_ids);
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 6236b4d96137..e08d143a707c 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -363,9 +363,8 @@ static int max30100_get_temp(struct max30100_data *data, int *val)
int ret;
/* start acquisition */
- ret = regmap_update_bits(data->regmap, MAX30100_REG_MODE_CONFIG,
- MAX30100_REG_MODE_CONFIG_TEMP_EN,
- MAX30100_REG_MODE_CONFIG_TEMP_EN);
+ ret = regmap_set_bits(data->regmap, MAX30100_REG_MODE_CONFIG,
+ MAX30100_REG_MODE_CONFIG_TEMP_EN);
if (ret)
return ret;
@@ -483,7 +482,7 @@ static void max30100_remove(struct i2c_client *client)
}
static const struct i2c_device_id max30100_id[] = {
- { "max30100", 0 },
+ { "max30100" },
{}
};
MODULE_DEVICE_TABLE(i2c, max30100_id);
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 6616729af5b7..07a343e35a81 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -448,9 +448,8 @@ static int max30102_get_temp(struct max30102_data *data, int *val, bool en)
}
/* start acquisition */
- ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
- MAX30102_REG_TEMP_CONFIG_TEMP_EN,
- MAX30102_REG_TEMP_CONFIG_TEMP_EN);
+ ret = regmap_set_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
+ MAX30102_REG_TEMP_CONFIG_TEMP_EN);
if (ret)
goto out;
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 37a35d1153d5..a56474be5dd2 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -253,7 +253,7 @@ static int am2315_probe(struct i2c_client *client)
}
static const struct i2c_device_id am2315_i2c_id[] = {
- {"am2315", 0},
+ { "am2315" },
{}
};
MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 202014da2785..9b355380c9bf 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -396,12 +396,12 @@ static int hdc100x_probe(struct i2c_client *client)
}
static const struct i2c_device_id hdc100x_id[] = {
- { "hdc100x", 0 },
- { "hdc1000", 0 },
- { "hdc1008", 0 },
- { "hdc1010", 0 },
- { "hdc1050", 0 },
- { "hdc1080", 0 },
+ { "hdc100x" },
+ { "hdc1000" },
+ { "hdc1008" },
+ { "hdc1010" },
+ { "hdc1050" },
+ { "hdc1080" },
{ }
};
MODULE_DEVICE_TABLE(i2c, hdc100x_id);
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index cdc4789213ba..a82dcc3da421 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -66,8 +67,10 @@
#define HDC3020_CRC8_POLYNOMIAL 0x31
-#define HDC3020_MIN_TEMP -40
-#define HDC3020_MAX_TEMP 125
+#define HDC3020_MIN_TEMP_MICRO -39872968
+#define HDC3020_MAX_TEMP_MICRO 124875639
+#define HDC3020_MAX_TEMP_HYST_MICRO 164748607
+#define HDC3020_MAX_HUM_MICRO 99220264
struct hdc3020_data {
struct i2c_client *client;
@@ -368,6 +371,105 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int hdc3020_thresh_get_temp(u16 thresh)
+{
+ int temp;
+
+ /*
+ * Get the temperature threshold from 9 LSBs, shift them to get
+ * the truncated temperature threshold representation and
+ * calculate the threshold according to the formula in the
+ * datasheet. Result is degree celsius scaled by 65535.
+ */
+ temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
+ HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+
+ return -2949075 + (175 * temp);
+}
+
+static int hdc3020_thresh_get_hum(u16 thresh)
+{
+ int hum;
+
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+ * threshold according to the formula in the datasheet. Result is
+ * percent scaled by 65535.
+ */
+ hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT;
+
+ return hum * 100;
+}
+
+static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
+{
+ u64 temp;
+ u16 thresh;
+
+ /*
+ * Calculate temperature threshold, shift it down to get the
+ * truncated threshold representation in the 9LSBs while keeping
+ * the current humidity threshold in the 7 MSBs.
+ */
+ temp = (u64)(s_temp + 45000000) * 65535ULL;
+ temp = div_u64(temp, 1000000 * 175) >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, temp);
+ thresh |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, curr_thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT);
+
+ return thresh;
+}
+
+static u16 hdc3020_thresh_set_hum(int s_hum, u16 curr_thresh)
+{
+ u64 hum;
+ u16 thresh;
+
+ /*
+ * Calculate humidity threshold, shift it down and up to get the
+ * truncated threshold representation in the 7MSBs while keeping
+ * the current temperature threshold in the 9 LSBs.
+ */
+ hum = (u64)(s_hum) * 65535ULL;
+ hum = div_u64(hum, 1000000 * 100) >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_HUM_MASK, hum);
+ thresh |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, curr_thresh);
+
+ return thresh;
+}
+
+static
+int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir)
+{
+ s64 s_clr;
+
+ /*
+ * Include directions when calculation the clear value,
+ * since hysteresis is unsigned by definition and the
+ * clear value is an absolute value which is signed.
+ */
+ if (dir == IIO_EV_DIR_RISING)
+ s_clr = s_thresh - s_hyst;
+ else
+ s_clr = s_thresh + s_hyst;
+
+ /* Divide by 65535 to get units of micro */
+ return div_s64(s_clr, 65535);
+}
+
+static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
+{
+ u8 buf[5];
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+
+ return hdc3020_write_bytes(data, buf, 5);
+}
+
static int hdc3020_write_thresh(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -376,67 +478,126 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
int val, int val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u8 buf[5];
- u64 tmp;
- u16 reg;
- int ret;
-
- /* Supported temperature range is from –40 to 125 degree celsius */
- if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
- return -EINVAL;
-
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW;
+ u16 reg, reg_val, reg_thresh_rd, reg_clr_rd, reg_thresh_wr, reg_clr_wr;
+ s64 s_thresh, s_hyst, s_clr;
+ int s_val, thresh, clr, ret;
+
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_HIGH;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_LOW;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_LOW;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh_rd);
+ if (ret < 0)
+ return ret;
+
+ thresh = ret;
+ ret = hdc3020_read_be16(data, reg_clr_rd);
if (ret < 0)
return ret;
+ clr = ret;
+ /* Scale value to include decimal part into calculations */
+ s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
switch (chan->type) {
case IIO_TEMP:
- /*
- * Calculate temperature threshold, shift it down to get the
- * truncated threshold representation in the 9LSBs while keeping
- * the current humidity threshold in the 7 MSBs.
- */
- tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 175);
- val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
- val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
- HDC3020_THRESH_HUM_TRUNC_SHIFT);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ s_val = max(s_val, HDC3020_MIN_TEMP_MICRO);
+ s_val = min(s_val, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_temp(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Set old hysteresis */
+ s_val = s_hyst;
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_temp returns temperature
+ * in degree celsius scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro degree celsius, scale by
+ * 65535 to get same units as s_thresh.
+ */
+ s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
+ s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_temp(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
case IIO_HUMIDITYRELATIVE:
- /*
- * Calculate humidity threshold, shift it down and up to get the
- * truncated threshold representation in the 7MSBs while keeping
- * the current temperature threshold in the 9 LSBs.
- */
- tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 100);
- val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
- val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ s_val = (s_val < 0) ? 0 : min(s_val, HDC3020_MAX_HUM_MICRO);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_hum(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Try to set old hysteresis */
+ s_val = min(abs(s_hyst), HDC3020_MAX_HUM_MICRO);
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_hum returns relative
+ * humidity in percent scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro percent, scale by 65535
+ * to get same units as s_thresh.
+ */
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, 0);
+ s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_hum(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
default:
return -EOPNOTSUPP;
}
- put_unaligned_be16(reg, buf);
- put_unaligned_be16(val, buf + 2);
- buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
- return hdc3020_write_bytes(data, buf, 5);
+ return _hdc3020_write_thresh(data, reg, reg_val);
}
static int hdc3020_read_thresh(struct iio_dev *indio_dev,
@@ -447,48 +608,60 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u16 reg;
- int ret;
+ u16 reg_thresh, reg_clr;
+ int thresh, clr, ret;
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW;
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_clr = HDC3020_R_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_thresh = HDC3020_R_T_RH_THRESH_LOW;
+ reg_clr = HDC3020_R_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh);
if (ret < 0)
return ret;
switch (chan->type) {
case IIO_TEMP:
- /*
- * Get the temperature threshold from 9 LSBs, shift them to get
- * the truncated temperature threshold representation and
- * calculate the threshold according to the formula in the
- * datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
- *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- *val = -2949075 + (175 * (*val));
+ thresh = hdc3020_thresh_get_temp(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_temp(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
case IIO_HUMIDITYRELATIVE:
- /*
- * Get the humidity threshold from 7 MSBs, shift them to get the
- * truncated humidity threshold representation and calculate the
- * threshold according to the formula in the datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
- *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
+ thresh = hdc3020_thresh_get_hum(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_hum(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
default:
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 9465908cc65e..0797ece1fcba 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -163,8 +163,8 @@ static int si7005_probe(struct i2c_client *client)
}
static const struct i2c_device_id si7005_id[] = {
- { "si7005", 0 },
- { "th02", 0 },
+ { "si7005" },
+ { "th02" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7005_id);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index fb1006649328..ff2dba50c0a5 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -23,6 +23,7 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/stat.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -33,17 +34,38 @@
#define SI7020CMD_TEMP_HOLD 0xE3
/* Software Reset */
#define SI7020CMD_RESET 0xFE
+#define SI7020CMD_USR_WRITE 0xE6
+/* "Heater Enabled" bit in the User Register */
+#define SI7020_USR_HEATER_EN BIT(2)
+#define SI7020CMD_HEATER_WRITE 0x51
+/* Heater current configuration bits */
+#define SI7020_HEATER_VAL GENMASK(3, 0)
+
+struct si7020_data {
+ struct i2c_client *client;
+ /* Lock for cached register values */
+ struct mutex lock;
+ u8 user_reg;
+ u8 heater_reg;
+};
+
+static const int si7020_heater_vals[] = { 0, 1, 0xF };
static int si7020_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
- struct i2c_client **client = iio_priv(indio_dev);
+ struct si7020_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = i2c_smbus_read_word_swapped(*client,
+ if (chan->type == IIO_CURRENT) {
+ *val = data->heater_reg;
+ return IIO_VAL_INT;
+ }
+
+ ret = i2c_smbus_read_word_swapped(data->client,
chan->type == IIO_TEMP ?
SI7020CMD_TEMP_HOLD :
SI7020CMD_RH_HOLD);
@@ -96,17 +118,118 @@ static const struct iio_chan_spec si7020_channels[] = {
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .extend_name = "heater",
+ }
+};
+
+static int si7020_update_reg(struct si7020_data *data,
+ u8 *reg, u8 cmd, u8 mask, u8 val)
+{
+ u8 new = (*reg & ~mask) | val;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, cmd, new);
+ if (ret)
+ return ret;
+
+ *reg = new;
+
+ return 0;
+}
+
+static int si7020_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si7020_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_CURRENT || val2 != 0 ||
+ val < si7020_heater_vals[0] || val > si7020_heater_vals[2])
+ return -EINVAL;
+
+ scoped_guard(mutex, &data->lock)
+ ret = si7020_update_reg(data, &data->heater_reg,
+ SI7020CMD_HEATER_WRITE, SI7020_HEATER_VAL, val);
+ return ret;
+ default:
+ return -EINVAL;
}
+}
+
+static int si7020_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type, int *length, long mask)
+{
+ if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ *vals = si7020_heater_vals;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_RANGE;
+}
+
+static ssize_t si7020_show_heater_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct si7020_data *data = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", !!(data->user_reg & SI7020_USR_HEATER_EN));
+}
+
+static ssize_t si7020_store_heater_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct si7020_data *data = iio_priv(indio_dev);
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &data->lock)
+ ret = si7020_update_reg(data, &data->user_reg, SI7020CMD_USR_WRITE,
+ SI7020_USR_HEATER_EN, val ? SI7020_USR_HEATER_EN : 0);
+
+ return ret < 0 ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(heater_enable, 0644,
+ si7020_show_heater_en, si7020_store_heater_en, 0);
+
+static struct attribute *si7020_attributes[] = {
+ &iio_dev_attr_heater_enable.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group si7020_attribute_group = {
+ .attrs = si7020_attributes,
};
static const struct iio_info si7020_info = {
.read_raw = si7020_read_raw,
+ .write_raw = si7020_write_raw,
+ .read_avail = si7020_read_available,
+ .attrs = &si7020_attribute_group,
};
static int si7020_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
- struct i2c_client **data;
+ struct si7020_data *data;
int ret;
if (!i2c_check_functionality(client->adapter,
@@ -126,7 +249,9 @@ static int si7020_probe(struct i2c_client *client)
return -ENOMEM;
data = iio_priv(indio_dev);
- *data = client;
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -134,12 +259,16 @@ static int si7020_probe(struct i2c_client *client)
indio_dev->channels = si7020_channels;
indio_dev->num_channels = ARRAY_SIZE(si7020_channels);
+ /* All the "reserved" bits in the User Register are 1s by default */
+ data->user_reg = 0x3A;
+ data->heater_reg = 0x0;
+
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id si7020_id[] = {
- { "si7020", 0 },
- { "th06", 0 },
+ { "si7020" },
+ { "th06" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 1a38b1915e7a..b7d5f4f0fada 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -34,6 +34,10 @@ void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
struct iio_ioctl_handler *h);
void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h);
+ssize_t do_iio_read_channel_label(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *c,
+ char *buf);
+
int __iio_add_chan_devattr(const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*func)(struct device *dev,
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 52a155ff3250..782fb80e44c2 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -36,8 +36,8 @@ config ADIS16475
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADIS16470, ADIS16475,
- ADIS16477, ADIS16465, ADIS16467, ADIS16500, ADIS16505, ADIS16507 inertial
- sensors.
+ ADIS16477, ADIS16465, ADIS16467, ADIS16500, ADIS16501, ADIS16505,
+ ADIS16507 inertial sensors.
To compile this driver as a module, choose M here: the module will be
called adis16475.
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 495caf4ce87a..876848b42f69 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -466,17 +466,17 @@ int adis_single_conversion(struct iio_dev *indio_dev,
unsigned int uval;
int ret;
- mutex_lock(&adis->state_lock);
+ guard(mutex)(&adis->state_lock);
ret = __adis_read_reg(adis, chan->address, &uval,
chan->scan_type.storagebits / 8);
if (ret)
- goto err_unlock;
+ return ret;
if (uval & error_mask) {
ret = __adis_check_status(adis);
if (ret)
- goto err_unlock;
+ return ret;
}
if (chan->scan_type.sign == 's')
@@ -484,10 +484,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
else
*val = uval & ((1 << chan->scan_type.realbits) - 1);
- ret = IIO_VAL_INT;
-err_unlock:
- mutex_unlock(&adis->state_lock);
- return ret;
+ return IIO_VAL_INT;
}
EXPORT_SYMBOL_NS_GPL(adis_single_conversion, IIO_ADISLIB);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 3eda32e12a53..0bfd6205f5f6 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -497,41 +497,38 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
{
struct adis16400_state *st = iio_priv(indio_dev);
- int ret, sps;
+ int sps;
switch (info) {
case IIO_CHAN_INFO_CALIBBIAS:
- ret = adis_write_reg_16(&st->adis,
- adis16400_addresses[chan->scan_index], val);
- return ret;
+ return adis_write_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index],
+ val);
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
/*
* Need to cache values so we can update if the frequency
* changes.
*/
- adis_dev_lock(&st->adis);
- st->filt_int = val;
- /* Work out update to current value */
- sps = st->variant->get_freq(st);
- if (sps < 0) {
- adis_dev_unlock(&st->adis);
- return sps;
+ adis_dev_auto_scoped_lock(&st->adis) {
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = st->variant->get_freq(st);
+ if (sps < 0)
+ return sps;
+
+ return __adis16400_set_filter(indio_dev, sps,
+ val * 1000 + val2 / 1000);
}
-
- ret = __adis16400_set_filter(indio_dev, sps,
- val * 1000 + val2 / 1000);
- adis_dev_unlock(&st->adis);
- return ret;
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
sps = val * 1000 + val2 / 1000;
if (sps <= 0)
return -EINVAL;
- adis_dev_lock(&st->adis);
- ret = st->variant->set_freq(st, sps);
- adis_dev_unlock(&st->adis);
- return ret;
+ adis_dev_auto_scoped_lock(&st->adis)
+ return st->variant->set_freq(st, sps);
+ unreachable();
default:
return -EINVAL;
}
@@ -596,29 +593,30 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = st->variant->temp_offset;
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- adis_dev_lock(&st->adis);
- /* Need both the number of taps and the sampling frequency */
- ret = __adis_read_reg_16(&st->adis,
- ADIS16400_SENS_AVG,
- &val16);
- if (ret) {
- adis_dev_unlock(&st->adis);
- return ret;
+ adis_dev_auto_scoped_lock(&st->adis) {
+ /*
+ * Need both the number of taps and the sampling
+ * frequency
+ */
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ &val16);
+ if (ret)
+ return ret;
+
+ ret = st->variant->get_freq(st);
+ if (ret)
+ return ret;
}
- ret = st->variant->get_freq(st);
- adis_dev_unlock(&st->adis);
- if (ret)
- return ret;
ret /= adis16400_3db_divisors[val16 & 0x07];
*val = ret / 1000;
*val2 = (ret % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
- adis_dev_lock(&st->adis);
- ret = st->variant->get_freq(st);
- adis_dev_unlock(&st->adis);
- if (ret)
- return ret;
+ adis_dev_auto_scoped_lock(&st->adis) {
+ ret = st->variant->get_freq(st);
+ if (ret)
+ return ret;
+ }
*val = ret / 1000;
*val2 = (ret % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 060a21c70460..482258ed5a3c 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -14,6 +14,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/imu/adis.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/irq.h>
#include <linux/lcm.h>
@@ -52,6 +53,8 @@
FIELD_PREP(ADIS16475_MSG_CTRL_DR_POL_MASK, x)
#define ADIS16475_SYNC_MODE_MASK GENMASK(4, 2)
#define ADIS16475_SYNC_MODE(x) FIELD_PREP(ADIS16475_SYNC_MODE_MASK, x)
+#define ADIS16575_SYNC_4KHZ_MASK BIT(11)
+#define ADIS16575_SYNC_4KHZ(x) FIELD_PREP(ADIS16575_SYNC_4KHZ_MASK, x)
#define ADIS16475_REG_UP_SCALE 0x62
#define ADIS16475_REG_DEC_RATE 0x64
#define ADIS16475_REG_GLOB_CMD 0x68
@@ -65,15 +68,32 @@
#define ADIS16500_BURST32_MASK BIT(9)
#define ADIS16500_BURST32(x) FIELD_PREP(ADIS16500_BURST32_MASK, x)
/* number of data elements in burst mode */
-#define ADIS16475_BURST32_MAX_DATA 32
+#define ADIS16475_BURST32_MAX_DATA_NO_TS32 32
+#define ADIS16575_BURST32_DATA_TS32 34
#define ADIS16475_BURST_MAX_DATA 20
#define ADIS16475_MAX_SCAN_DATA 20
/* spi max speed in brust mode */
#define ADIS16475_BURST_MAX_SPEED 1000000
+#define ADIS16575_BURST_MAX_SPEED 8000000
#define ADIS16475_LSB_DEC_MASK 0
#define ADIS16475_LSB_FIR_MASK 1
#define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
#define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7)
+#define ADIS16575_MAX_FIFO_WM 511UL
+#define ADIS16475_REG_FIFO_CTRL 0x5A
+#define ADIS16575_WM_LVL_MASK GENMASK(15, 4)
+#define ADIS16575_WM_LVL(x) FIELD_PREP(ADIS16575_WM_LVL_MASK, x)
+#define ADIS16575_WM_POL_MASK BIT(3)
+#define ADIS16575_WM_POL(x) FIELD_PREP(ADIS16575_WM_POL_MASK, x)
+#define ADIS16575_WM_EN_MASK BIT(2)
+#define ADIS16575_WM_EN(x) FIELD_PREP(ADIS16575_WM_EN_MASK, x)
+#define ADIS16575_OVERFLOW_MASK BIT(1)
+#define ADIS16575_STOP_ENQUEUE FIELD_PREP(ADIS16575_OVERFLOW_MASK, 0)
+#define ADIS16575_OVERWRITE_OLDEST FIELD_PREP(ADIS16575_OVERFLOW_MASK, 1)
+#define ADIS16575_FIFO_EN_MASK BIT(0)
+#define ADIS16575_FIFO_EN(x) FIELD_PREP(ADIS16575_FIFO_EN_MASK, x)
+#define ADIS16575_FIFO_FLUSH_CMD BIT(5)
+#define ADIS16575_REG_FIFO_CNT 0x3C
enum {
ADIS16475_SYNC_DIRECT = 1,
@@ -95,6 +115,8 @@ struct adis16475_chip_info {
const char *name;
#define ADIS16475_HAS_BURST32 BIT(0)
#define ADIS16475_HAS_BURST_DELTA_DATA BIT(1)
+#define ADIS16475_HAS_TIMESTAMP32 BIT(2)
+#define ADIS16475_NEEDS_BURST_REQUEST BIT(3)
const long flags;
u32 num_channels;
u32 gyro_max_val;
@@ -116,6 +138,7 @@ struct adis16475 {
bool burst32;
unsigned long lsb_flag;
u16 sync_mode;
+ u16 fifo_watermark;
/* Alignment needed for the timestamp */
__be16 data[ADIS16475_MAX_SCAN_DATA] __aligned(8);
};
@@ -279,30 +302,25 @@ static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
u16 dec;
u32 sample_rate = st->clk_freq;
- adis_dev_lock(&st->adis);
+ adis_dev_auto_lock(&st->adis);
if (st->sync_mode == ADIS16475_SYNC_SCALED) {
u16 sync_scale;
ret = __adis_read_reg_16(&st->adis, ADIS16475_REG_UP_SCALE, &sync_scale);
if (ret)
- goto error;
+ return ret;
sample_rate = st->clk_freq * sync_scale;
}
ret = __adis_read_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, &dec);
if (ret)
- goto error;
-
- adis_dev_unlock(&st->adis);
+ return ret;
*freq = DIV_ROUND_CLOSEST(sample_rate, dec + 1);
return 0;
-error:
- adis_dev_unlock(&st->adis);
- return ret;
}
static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
@@ -310,15 +328,19 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
u16 dec;
int ret;
u32 sample_rate = st->clk_freq;
+ /* The optimal sample rate for the supported IMUs is between int_clk - 100 and int_clk + 100. */
+ u32 max_sample_rate = st->info->int_clk * 1000 + 100000;
+ u32 min_sample_rate = st->info->int_clk * 1000 - 100000;
if (!freq)
return -EINVAL;
- adis_dev_lock(&st->adis);
+ adis_dev_auto_lock(&st->adis);
/*
* When using sync scaled mode, the input clock needs to be scaled so that we have
- * an IMU sample rate between (optimally) 1900 and 2100. After this, we can use the
- * decimation filter to lower the sampling rate in order to get what the user wants.
+ * an IMU sample rate between (optimally) int_clk - 100 and int_clk + 100.
+ * After this, we can use the decimation filter to lower the sampling rate in order
+ * to get what the user wants.
* Optimally, the user sample rate is a multiple of both the IMU sample rate and
* the input clock. Hence, calculating the sync_scale dynamically gives us better
* chances of achieving a perfect/integer value for DEC_RATE. The math here is:
@@ -336,28 +358,29 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
* solution. In this case, we get the highest multiple of the input clock
* lower than the IMU max sample rate.
*/
- if (scaled_rate > 2100000)
- scaled_rate = 2100000 / st->clk_freq * st->clk_freq;
+ if (scaled_rate > max_sample_rate)
+ scaled_rate = max_sample_rate / st->clk_freq * st->clk_freq;
else
- scaled_rate = 2100000 / scaled_rate * scaled_rate;
+ scaled_rate = max_sample_rate / scaled_rate * scaled_rate;
/*
* This is not an hard requirement but it's not advised to run the IMU
- * with a sample rate lower than 1900Hz due to possible undersampling
- * issues. However, there are users that might really want to take the risk.
- * Hence, we provide a module parameter for them. If set, we allow sample
- * rates lower than 1.9KHz. By default, we won't allow this and we just roundup
- * the rate to the next multiple of the input clock bigger than 1.9KHz. This
- * is done like this as in some cases (when DEC_RATE is 0) might give
- * us the closest value to the one desired by the user...
+ * with a sample rate lower than internal clock frequency, due to possible
+ * undersampling issues. However, there are users that might really want
+ * to take the risk. Hence, we provide a module parameter for them. If set,
+ * we allow sample rates lower than internal clock frequency.
+ * By default, we won't allow this and we just roundup the rate to the next
+ * multiple of the input clock. This is done like this as in some cases
+ * (when DEC_RATE is 0) might give us the closest value to the one desired
+ * by the user...
*/
- if (scaled_rate < 1900000 && !low_rate_allow)
- scaled_rate = roundup(1900000, st->clk_freq);
+ if (scaled_rate < min_sample_rate && !low_rate_allow)
+ scaled_rate = roundup(min_sample_rate, st->clk_freq);
sync_scale = scaled_rate / st->clk_freq;
ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_UP_SCALE, sync_scale);
if (ret)
- goto error;
+ return ret;
sample_rate = scaled_rate;
}
@@ -372,9 +395,8 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
if (ret)
- goto error;
+ return ret;
- adis_dev_unlock(&st->adis);
/*
* If decimation is used, then gyro and accel data will have meaningful
* bits on the LSB registers. This info is used on the trigger handler.
@@ -382,9 +404,6 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
assign_bit(ADIS16475_LSB_DEC_MASK, &st->lsb_flag, dec);
return 0;
-error:
- adis_dev_unlock(&st->adis);
- return ret;
}
/* The values are approximated. */
@@ -437,6 +456,118 @@ static int adis16475_set_filter(struct adis16475 *st, const u32 filter)
return 0;
}
+static ssize_t adis16475_get_fifo_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u16 val;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIFO_CTRL, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(ADIS16575_FIFO_EN_MASK, val));
+}
+
+static ssize_t adis16475_get_fifo_watermark(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u16 val;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIFO_CTRL, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(ADIS16575_WM_LVL_MASK, val) + 1);
+}
+
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ADIS16575_MAX_FIFO_WM);
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
+ adis16475_get_fifo_watermark, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+ adis16475_get_fifo_enabled, NULL, 0);
+
+static const struct iio_dev_attr *adis16475_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_watermark_min,
+ &iio_dev_attr_hwfifo_watermark_max,
+ &iio_dev_attr_hwfifo_watermark,
+ &iio_dev_attr_hwfifo_enabled,
+ NULL
+};
+
+static int adis16475_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+
+ return adis_update_bits(adis, ADIS16475_REG_FIFO_CTRL,
+ ADIS16575_FIFO_EN_MASK, (u16)ADIS16575_FIFO_EN(1));
+}
+
+static int adis16475_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret;
+
+ adis_dev_auto_lock(&st->adis);
+
+ ret = __adis_update_bits(adis, ADIS16475_REG_FIFO_CTRL,
+ ADIS16575_FIFO_EN_MASK, (u16)ADIS16575_FIFO_EN(0));
+ if (ret)
+ return ret;
+
+ return __adis_write_reg_16(adis, ADIS16475_REG_GLOB_CMD,
+ ADIS16575_FIFO_FLUSH_CMD);
+}
+
+static const struct iio_buffer_setup_ops adis16475_buffer_ops = {
+ .postenable = adis16475_buffer_postenable,
+ .postdisable = adis16475_buffer_postdisable,
+};
+
+static int adis16475_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u16 wm_lvl;
+
+ adis_dev_auto_lock(&st->adis);
+
+ val = min_t(unsigned int, val, ADIS16575_MAX_FIFO_WM);
+
+ wm_lvl = ADIS16575_WM_LVL(val - 1);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_FIFO_CTRL, ADIS16575_WM_LVL_MASK, wm_lvl);
+ if (ret)
+ return ret;
+
+ st->fifo_watermark = val;
+
+ return 0;
+}
+
static const u32 adis16475_calib_regs[] = {
[ADIS16475_SCAN_GYRO_X] = ADIS16475_REG_X_GYRO_BIAS_L,
[ADIS16475_SCAN_GYRO_Y] = ADIS16475_REG_Y_GYRO_BIAS_L,
@@ -646,6 +777,22 @@ static const struct iio_chan_spec adis16475_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(7)
};
+static const struct iio_chan_spec adis16575_channels[] = {
+ ADIS16475_GYRO_CHANNEL(X),
+ ADIS16475_GYRO_CHANNEL(Y),
+ ADIS16475_GYRO_CHANNEL(Z),
+ ADIS16475_ACCEL_CHANNEL(X),
+ ADIS16475_ACCEL_CHANNEL(Y),
+ ADIS16475_ACCEL_CHANNEL(Z),
+ ADIS16475_TEMP_CHANNEL(),
+ ADIS16475_DELTANG_CHAN(X),
+ ADIS16475_DELTANG_CHAN(Y),
+ ADIS16475_DELTANG_CHAN(Z),
+ ADIS16475_DELTVEL_CHAN(X),
+ ADIS16475_DELTVEL_CHAN(Y),
+ ADIS16475_DELTVEL_CHAN(Z),
+};
+
enum adis16475_variant {
ADIS16470,
ADIS16475_1,
@@ -661,12 +808,19 @@ enum adis16475_variant {
ADIS16467_2,
ADIS16467_3,
ADIS16500,
+ ADIS16501,
ADIS16505_1,
ADIS16505_2,
ADIS16505_3,
ADIS16507_1,
ADIS16507_2,
ADIS16507_3,
+ ADIS16575_2,
+ ADIS16575_3,
+ ADIS16576_2,
+ ADIS16576_3,
+ ADIS16577_2,
+ ADIS16577_3,
};
enum {
@@ -689,32 +843,33 @@ static const char * const adis16475_status_error_msgs[] = {
[ADIS16475_DIAG_STAT_CLK] = "Clock error",
};
-#define ADIS16475_DATA(_prod_id, _timeouts) \
-{ \
- .msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
- .glob_cmd_reg = ADIS16475_REG_GLOB_CMD, \
- .diag_stat_reg = ADIS16475_REG_DIAG_STAT, \
- .prod_id_reg = ADIS16475_REG_PROD_ID, \
- .prod_id = (_prod_id), \
- .self_test_mask = BIT(2), \
- .self_test_reg = ADIS16475_REG_GLOB_CMD, \
- .cs_change_delay = 16, \
- .read_delay = 5, \
- .write_delay = 5, \
- .status_error_msgs = adis16475_status_error_msgs, \
- .status_error_mask = BIT(ADIS16475_DIAG_STAT_DATA_PATH) | \
- BIT(ADIS16475_DIAG_STAT_FLASH_MEM) | \
- BIT(ADIS16475_DIAG_STAT_SPI) | \
- BIT(ADIS16475_DIAG_STAT_STANDBY) | \
- BIT(ADIS16475_DIAG_STAT_SENSOR) | \
- BIT(ADIS16475_DIAG_STAT_MEMORY) | \
- BIT(ADIS16475_DIAG_STAT_CLK), \
- .unmasked_drdy = true, \
- .timeouts = (_timeouts), \
- .burst_reg_cmd = ADIS16475_REG_GLOB_CMD, \
- .burst_len = ADIS16475_BURST_MAX_DATA, \
- .burst_max_len = ADIS16475_BURST32_MAX_DATA, \
- .burst_max_speed_hz = ADIS16475_BURST_MAX_SPEED \
+#define ADIS16475_DATA(_prod_id, _timeouts, _burst_max_len, _burst_max_speed_hz, _has_fifo) \
+{ \
+ .msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
+ .glob_cmd_reg = ADIS16475_REG_GLOB_CMD, \
+ .diag_stat_reg = ADIS16475_REG_DIAG_STAT, \
+ .prod_id_reg = ADIS16475_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .self_test_mask = BIT(2), \
+ .self_test_reg = ADIS16475_REG_GLOB_CMD, \
+ .cs_change_delay = 16, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .status_error_msgs = adis16475_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16475_DIAG_STAT_DATA_PATH) | \
+ BIT(ADIS16475_DIAG_STAT_FLASH_MEM) | \
+ BIT(ADIS16475_DIAG_STAT_SPI) | \
+ BIT(ADIS16475_DIAG_STAT_STANDBY) | \
+ BIT(ADIS16475_DIAG_STAT_SENSOR) | \
+ BIT(ADIS16475_DIAG_STAT_MEMORY) | \
+ BIT(ADIS16475_DIAG_STAT_CLK), \
+ .unmasked_drdy = true, \
+ .has_fifo = _has_fifo, \
+ .timeouts = (_timeouts), \
+ .burst_reg_cmd = ADIS16475_REG_GLOB_CMD, \
+ .burst_len = ADIS16475_BURST_MAX_DATA, \
+ .burst_max_len = _burst_max_len, \
+ .burst_max_speed_hz = _burst_max_speed_hz \
}
static const struct adis16475_sync adis16475_sync_mode[] = {
@@ -724,6 +879,12 @@ static const struct adis16475_sync adis16475_sync_mode[] = {
{ ADIS16475_SYNC_PULSE, 1000, 2100 },
};
+static const struct adis16475_sync adis16575_sync_mode[] = {
+ { ADIS16475_SYNC_OUTPUT },
+ { ADIS16475_SYNC_DIRECT, 1900, 4100 },
+ { ADIS16475_SYNC_SCALED, 1, 400 },
+};
+
static const struct adis_timeout adis16475_timeouts = {
.reset_ms = 200,
.sw_reset_ms = 200,
@@ -752,7 +913,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16470, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16470, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16475_1] = {
.name = "adis16475-1",
@@ -769,7 +932,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16475_2] = {
.name = "adis16475-2",
@@ -786,7 +951,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16475_3] = {
.name = "adis16475-3",
@@ -803,7 +970,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16477_1] = {
.name = "adis16477-1",
@@ -821,7 +990,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16477_2] = {
.name = "adis16477-2",
@@ -839,7 +1010,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16477_3] = {
.name = "adis16477-3",
@@ -857,7 +1030,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16465_1] = {
.name = "adis16465-1",
@@ -874,7 +1049,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16465_2] = {
.name = "adis16465-2",
@@ -891,7 +1068,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16465_3] = {
.name = "adis16465-3",
@@ -908,7 +1087,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16467_1] = {
.name = "adis16467-1",
@@ -925,7 +1106,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16467_2] = {
.name = "adis16467-2",
@@ -942,7 +1125,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16467_3] = {
.name = "adis16467-3",
@@ -959,7 +1144,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16500] = {
.name = "adis16500",
@@ -978,7 +1165,30 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
+ },
+ [ADIS16501] = {
+ .name = "adis16501",
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 125,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
+ .adis_data = ADIS16475_DATA(16501, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16505_1] = {
.name = "adis16505-1",
@@ -997,7 +1207,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16505_2] = {
.name = "adis16505-2",
@@ -1016,7 +1228,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16505_3] = {
.name = "adis16505-3",
@@ -1035,7 +1249,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16507_1] = {
.name = "adis16507-1",
@@ -1054,7 +1270,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16507_2] = {
.name = "adis16507-2",
@@ -1073,7 +1291,9 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
},
[ADIS16507_3] = {
.name = "adis16507-3",
@@ -1092,7 +1312,147 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
.flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
- .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts,
+ ADIS16475_BURST32_MAX_DATA_NO_TS32,
+ ADIS16475_BURST_MAX_SPEED, false),
+ },
+ [ADIS16575_2] = {
+ .name = "adis16575-2",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 8,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(450),
+ .deltvel_max_val = 100,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16575, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
+ },
+ [ADIS16575_3] = {
+ .name = "adis16575-3",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 8,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2000),
+ .deltvel_max_val = 100,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16575, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
+ },
+ [ADIS16576_2] = {
+ .name = "adis16576-2",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 40,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(450),
+ .deltvel_max_val = 125,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16576, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
+ },
+ [ADIS16576_3] = {
+ .name = "adis16576-3",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 40,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2000),
+ .deltvel_max_val = 125,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16576, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
+ },
+ [ADIS16577_2] = {
+ .name = "adis16577-2",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 40,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(450),
+ .deltvel_max_val = 400,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16577, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
+ },
+ [ADIS16577_3] = {
+ .name = "adis16577-3",
+ .num_channels = ARRAY_SIZE(adis16575_channels),
+ .channels = adis16575_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 40,
+ .accel_max_scale = IIO_M_S_2_TO_G(32000 << 16),
+ .temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2000),
+ .deltvel_max_val = 400,
+ .int_clk = 4000,
+ .max_dec = 3999,
+ .sync = adis16575_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16575_sync_mode),
+ .flags = ADIS16475_HAS_BURST32 |
+ ADIS16475_HAS_BURST_DELTA_DATA |
+ ADIS16475_NEEDS_BURST_REQUEST |
+ ADIS16475_HAS_TIMESTAMP32,
+ .adis_data = ADIS16475_DATA(16577, &adis16475_timeouts,
+ ADIS16575_BURST32_DATA_TS32,
+ ADIS16575_BURST_MAX_SPEED, true),
},
};
@@ -1128,15 +1488,20 @@ static const struct iio_info adis16475_info = {
.debugfs_reg_access = adis_debugfs_reg_access,
};
+static const struct iio_info adis16575_info = {
+ .read_raw = &adis16475_read_raw,
+ .write_raw = &adis16475_write_raw,
+ .update_scan_mode = adis16475_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+ .hwfifo_set_watermark = adis16475_set_watermark,
+};
+
static bool adis16475_validate_crc(const u8 *buffer, u16 crc,
- const bool burst32)
+ u16 burst_size, u16 start_idx)
{
int i;
- /* extra 6 elements for low gyro and accel */
- const u16 sz = burst32 ? ADIS16475_BURST32_MAX_DATA :
- ADIS16475_BURST_MAX_DATA;
- for (i = 0; i < sz - 2; i++)
+ for (i = start_idx; i < burst_size - 2; i++)
crc -= buffer[i];
return crc == 0;
@@ -1146,10 +1511,14 @@ static void adis16475_burst32_check(struct adis16475 *st)
{
int ret;
struct adis *adis = &st->adis;
+ u8 timestamp32 = 0;
if (!(st->info->flags & ADIS16475_HAS_BURST32))
return;
+ if (st->info->flags & ADIS16475_HAS_TIMESTAMP32)
+ timestamp32 = 1;
+
if (st->lsb_flag && !st->burst32) {
const u16 en = ADIS16500_BURST32(1);
@@ -1163,9 +1532,12 @@ static void adis16475_burst32_check(struct adis16475 *st)
/*
* In 32-bit mode we need extra 2 bytes for all gyro
* and accel channels.
+ * If the device has 32-bit timestamp value we need 2 extra
+ * bytes for it.
*/
- adis->burst_extra_len = 6 * sizeof(u16);
- adis->xfer[1].len += 6 * sizeof(u16);
+ adis->burst_extra_len = (6 + timestamp32) * sizeof(u16);
+ adis->xfer[1].len += (6 + timestamp32) * sizeof(u16);
+
dev_dbg(&adis->spi->dev, "Enable burst32 mode, xfer:%d",
adis->xfer[1].len);
@@ -1181,15 +1553,14 @@ static void adis16475_burst32_check(struct adis16475 *st)
/* Remove the extra bits */
adis->burst_extra_len = 0;
- adis->xfer[1].len -= 6 * sizeof(u16);
+ adis->xfer[1].len -= (6 + timestamp32) * sizeof(u16);
dev_dbg(&adis->spi->dev, "Disable burst32 mode, xfer:%d\n",
adis->xfer[1].len);
}
}
-static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+static int adis16475_push_single_sample(struct iio_poll_func *pf)
{
- struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16475 *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
@@ -1197,20 +1568,29 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
__be16 *buffer;
u16 crc;
bool valid;
+ u8 crc_offset = 9;
+ u16 burst_size = ADIS16475_BURST_MAX_DATA;
+ u16 start_idx = (st->info->flags & ADIS16475_HAS_TIMESTAMP32) ? 2 : 0;
+
/* offset until the first element after gyro and accel */
const u8 offset = st->burst32 ? 13 : 7;
+ if (st->burst32) {
+ crc_offset = (st->info->flags & ADIS16475_HAS_TIMESTAMP32) ? 16 : 15;
+ burst_size = adis->data->burst_max_len;
+ }
+
ret = spi_sync(adis->spi, &adis->msg);
if (ret)
- goto check_burst32;
+ return ret;
buffer = adis->buffer;
- crc = be16_to_cpu(buffer[offset + 2]);
- valid = adis16475_validate_crc(adis->buffer, crc, st->burst32);
+ crc = be16_to_cpu(buffer[crc_offset]);
+ valid = adis16475_validate_crc(adis->buffer, crc, burst_size, start_idx);
if (!valid) {
dev_err(&adis->spi->dev, "Invalid crc\n");
- goto check_burst32;
+ return -EINVAL;
}
for_each_set_bit(bit, indio_dev->active_scan_mask,
@@ -1270,14 +1650,122 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
}
}
+ /* There might not be a timestamp option for some devices. */
iio_push_to_buffers_with_timestamp(indio_dev, st->data, pf->timestamp);
-check_burst32:
+
+ return 0;
+}
+
+static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+
+ adis16475_push_single_sample(pf);
/*
* We only check the burst mode at the end of the current capture since
* it takes a full data ready cycle for the device to update the burst
* array.
*/
adis16475_burst32_check(st);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function updates the first tx byte from the adis message based on the
+ * given burst request.
+ */
+static void adis16575_update_msg_for_burst(struct adis *adis, u8 burst_req)
+{
+ unsigned int burst_max_length;
+ u8 *tx;
+
+ if (adis->data->burst_max_len)
+ burst_max_length = adis->data->burst_max_len;
+ else
+ burst_max_length = adis->data->burst_len + adis->burst_extra_len;
+
+ tx = adis->buffer + burst_max_length;
+ tx[0] = ADIS_READ_REG(burst_req);
+}
+
+static int adis16575_custom_burst_read(struct iio_poll_func *pf, u8 burst_req)
+{
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+
+ adis16575_update_msg_for_burst(adis, burst_req);
+
+ if (burst_req)
+ return spi_sync(adis->spi, &adis->msg);
+
+ return adis16475_push_single_sample(pf);
+}
+
+/*
+ * This handler is meant to be used for devices which support burst readings
+ * from FIFO (namely devices from adis1657x family).
+ * In order to pop the FIFO the 0x68 0x00 FIFO pop burst request has to be sent.
+ * If the previous device command was not a FIFO pop burst request, the FIFO pop
+ * burst request will simply pop the FIFO without returning valid data.
+ * For the nth consecutive burst request, thedevice will send the data popped
+ * with the (n-1)th consecutive burst request.
+ * In order to read the data which was popped previously, without popping the
+ * FIFO, the 0x00 0x00 burst request has to be sent.
+ * If after a 0x68 0x00 FIFO pop burst request, there is any other device access
+ * different from a 0x68 0x00 or a 0x00 0x00 burst request, the FIFO data popped
+ * previously will be lost.
+ */
+static irqreturn_t adis16475_trigger_handler_with_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret;
+ u16 fifo_cnt, i;
+
+ adis_dev_auto_lock(&st->adis);
+
+ ret = __adis_read_reg_16(adis, ADIS16575_REG_FIFO_CNT, &fifo_cnt);
+ if (ret)
+ goto unlock;
+
+ /*
+ * If no sample is available, nothing can be read. This can happen if
+ * a the used trigger has a higher frequency than the selected sample rate.
+ */
+ if (!fifo_cnt)
+ goto unlock;
+
+ /*
+ * First burst request - FIFO pop: popped data will be returned in the
+ * next burst request.
+ */
+ ret = adis16575_custom_burst_read(pf, adis->data->burst_reg_cmd);
+ if (ret)
+ goto unlock;
+
+ for (i = 0; i < fifo_cnt - 1; i++) {
+ ret = adis16475_push_single_sample(pf);
+ if (ret)
+ goto unlock;
+ }
+
+ /* FIFO read without popping */
+ ret = adis16575_custom_burst_read(pf, 0);
+
+unlock:
+ /*
+ * We only check the burst mode at the end of the current capture since
+ * reading data from registers will impact the FIFO reading.
+ */
+ adis16475_burst32_check(st);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -1289,8 +1777,18 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
struct device *dev = &st->adis.spi->dev;
const struct adis16475_sync *sync;
u32 sync_mode;
+ u16 max_sample_rate = st->info->int_clk + 100;
u16 val;
+ /* if available, enable 4khz internal clock */
+ if (st->info->int_clk == 4000) {
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16575_SYNC_4KHZ_MASK,
+ (u16)ADIS16575_SYNC_4KHZ(1));
+ if (ret)
+ return ret;
+ }
+
/* default to internal clk */
st->clk_freq = st->info->int_clk * 1000;
@@ -1329,10 +1827,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
/*
* In sync scaled mode, the IMU sample rate is the clk_freq * sync_scale.
* Hence, default the IMU sample rate to the highest multiple of the input
- * clock lower than the IMU max sample rate. The optimal range is
- * 1900-2100 sps...
+ * clock lower than the IMU max sample rate.
*/
- up_scale = 2100 / st->clk_freq;
+ up_scale = max_sample_rate / st->clk_freq;
ret = __adis_write_reg_16(&st->adis,
ADIS16475_REG_UP_SCALE,
@@ -1370,34 +1867,69 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
u8 polarity;
struct spi_device *spi = st->adis.spi;
- /*
- * It is possible to configure the data ready polarity. Furthermore, we
- * need to update the adis struct if we want data ready as active low.
- */
irq_type = irq_get_trigger_type(spi->irq);
- if (irq_type == IRQ_TYPE_EDGE_RISING) {
- polarity = 1;
- st->adis.irq_flag = IRQF_TRIGGER_RISING;
- } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
- polarity = 0;
- st->adis.irq_flag = IRQF_TRIGGER_FALLING;
+
+ if (st->adis.data->has_fifo) {
+ /*
+ * It is possible to configure the fifo watermark pin polarity.
+ * Furthermore, we need to update the adis struct if we want the
+ * watermark pin active low.
+ */
+ if (irq_type == IRQ_TYPE_LEVEL_HIGH) {
+ polarity = 1;
+ st->adis.irq_flag = IRQF_TRIGGER_HIGH;
+ } else if (irq_type == IRQ_TYPE_LEVEL_LOW) {
+ polarity = 0;
+ st->adis.irq_flag = IRQF_TRIGGER_LOW;
+ } else {
+ dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
+
+ /* Configure the watermark pin polarity. */
+ val = ADIS16575_WM_POL(polarity);
+ ret = adis_update_bits(&st->adis, ADIS16475_REG_FIFO_CTRL,
+ ADIS16575_WM_POL_MASK, val);
+ if (ret)
+ return ret;
+
+ /* Enable watermark interrupt pin. */
+ ret = adis_update_bits(&st->adis, ADIS16475_REG_FIFO_CTRL,
+ ADIS16575_WM_EN_MASK,
+ (u16)ADIS16575_WM_EN(1));
+ if (ret)
+ return ret;
+
} else {
- dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
- irq_type);
- return -EINVAL;
- }
+ /*
+ * It is possible to configure the data ready polarity. Furthermore, we
+ * need to update the adis struct if we want data ready as active low.
+ */
+ if (irq_type == IRQ_TYPE_EDGE_RISING) {
+ polarity = 1;
+ st->adis.irq_flag = IRQF_TRIGGER_RISING;
+ } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
+ polarity = 0;
+ st->adis.irq_flag = IRQF_TRIGGER_FALLING;
+ } else {
+ dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
- val = ADIS16475_MSG_CTRL_DR_POL(polarity);
- ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
- ADIS16475_MSG_CTRL_DR_POL_MASK, val);
- if (ret)
- return ret;
- /*
- * There is a delay writing to any bits written to the MSC_CTRL
- * register. It should not be bigger than 200us, so 250 should be more
- * than enough!
- */
- usleep_range(250, 260);
+ val = ADIS16475_MSG_CTRL_DR_POL(polarity);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_MSG_CTRL_DR_POL_MASK, val);
+ if (ret)
+ return ret;
+ /*
+ * There is a delay writing to any bits written to the MSC_CTRL
+ * register. It should not be bigger than 200us, so 250 should be more
+ * than enough!
+ */
+ usleep_range(250, 260);
+ }
return 0;
}
@@ -1408,6 +1940,7 @@ static int adis16475_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
struct adis16475 *st;
int ret;
+ u16 val;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -1426,7 +1959,10 @@ static int adis16475_probe(struct spi_device *spi)
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
- indio_dev->info = &adis16475_info;
+ if (st->adis.data->has_fifo)
+ indio_dev->info = &adis16575_info;
+ else
+ indio_dev->info = &adis16475_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = __adis_initial_startup(&st->adis);
@@ -1441,10 +1977,26 @@ static int adis16475_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
- adis16475_trigger_handler);
- if (ret)
- return ret;
+ if (st->adis.data->has_fifo) {
+ ret = devm_adis_setup_buffer_and_trigger_with_attrs(&st->adis, indio_dev,
+ adis16475_trigger_handler_with_fifo,
+ &adis16475_buffer_ops,
+ adis16475_fifo_attributes);
+ if (ret)
+ return ret;
+
+ /* Update overflow behavior to always overwrite the oldest sample. */
+ val = ADIS16575_OVERWRITE_OLDEST;
+ ret = adis_update_bits(&st->adis, ADIS16475_REG_FIFO_CTRL,
+ ADIS16575_OVERFLOW_MASK, val);
+ if (ret)
+ return ret;
+ } else {
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16475_trigger_handler);
+ if (ret)
+ return ret;
+ }
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
@@ -1484,6 +2036,8 @@ static const struct of_device_id adis16475_of_match[] = {
.data = &adis16475_chip_info[ADIS16467_3] },
{ .compatible = "adi,adis16500",
.data = &adis16475_chip_info[ADIS16500] },
+ { .compatible = "adi,adis16501",
+ .data = &adis16475_chip_info[ADIS16501] },
{ .compatible = "adi,adis16505-1",
.data = &adis16475_chip_info[ADIS16505_1] },
{ .compatible = "adi,adis16505-2",
@@ -1496,6 +2050,18 @@ static const struct of_device_id adis16475_of_match[] = {
.data = &adis16475_chip_info[ADIS16507_2] },
{ .compatible = "adi,adis16507-3",
.data = &adis16475_chip_info[ADIS16507_3] },
+ { .compatible = "adi,adis16575-2",
+ .data = &adis16475_chip_info[ADIS16575_2] },
+ { .compatible = "adi,adis16575-3",
+ .data = &adis16475_chip_info[ADIS16575_3] },
+ { .compatible = "adi,adis16576-2",
+ .data = &adis16475_chip_info[ADIS16576_2] },
+ { .compatible = "adi,adis16576-3",
+ .data = &adis16475_chip_info[ADIS16576_3] },
+ { .compatible = "adi,adis16577-2",
+ .data = &adis16475_chip_info[ADIS16577_2] },
+ { .compatible = "adi,adis16577-3",
+ .data = &adis16475_chip_info[ADIS16577_3] },
{ },
};
MODULE_DEVICE_TABLE(of, adis16475_of_match);
@@ -1515,12 +2081,19 @@ static const struct spi_device_id adis16475_ids[] = {
{ "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
{ "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
{ "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
+ { "adis16501", (kernel_ulong_t)&adis16475_chip_info[ADIS16501] },
{ "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
{ "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
{ "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
{ "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
{ "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
{ "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
+ { "adis16575-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16575_2] },
+ { "adis16575-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16575_3] },
+ { "adis16576-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16576_2] },
+ { "adis16576-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16576_3] },
+ { "adis16577-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16577_2] },
+ { "adis16577-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16577_3] },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16475_ids);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index b40a55bba30c..c59ef6f7cfd4 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -104,11 +104,10 @@
*/
#define ADIS16495_REG_SYNC_SCALE ADIS16480_REG(0x03, 0x10)
#define ADIS16495_REG_BURST_CMD ADIS16480_REG(0x00, 0x7C)
-#define ADIS16495_BURST_ID 0xA5A5
+#define ADIS16495_GYRO_ACCEL_BURST_ID 0xA5A5
+#define ADIS16545_DELTA_ANG_VEL_BURST_ID 0xC3C3
/* total number of segments in burst */
#define ADIS16495_BURST_MAX_DATA 20
-/* spi max speed in burst mode */
-#define ADIS16495_BURST_MAX_SPEED 6000000
#define ADIS16480_REG_SERIAL_NUM ADIS16480_REG(0x04, 0x20)
@@ -134,6 +133,10 @@
#define ADIS16480_SYNC_MODE_MSK BIT(8)
#define ADIS16480_SYNC_MODE(x) FIELD_PREP(ADIS16480_SYNC_MODE_MSK, x)
+#define ADIS16545_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
+#define ADIS16545_BURST_DATA_SEL_1_CHN_MASK GENMASK(16, 11)
+#define ADIS16545_BURST_DATA_SEL_MASK BIT(8)
+
struct adis16480_chip_info {
unsigned int num_channels;
const struct iio_chan_spec *channels;
@@ -142,11 +145,14 @@ struct adis16480_chip_info {
unsigned int accel_max_val;
unsigned int accel_max_scale;
unsigned int temp_scale;
+ unsigned int deltang_max_val;
+ unsigned int deltvel_max_val;
unsigned int int_clk;
unsigned int max_dec_rate;
const unsigned int *filter_freqs;
bool has_pps_clk_mode;
bool has_sleep_cnt;
+ bool has_burst_delta_data;
const struct adis_data adis_data;
};
@@ -170,6 +176,7 @@ struct adis16480 {
struct clk *ext_clk;
enum adis16480_clock_mode clk_mode;
unsigned int clk_freq;
+ u16 burst_id;
/* Alignment needed for the timestamp */
__be16 data[ADIS16495_BURST_MAX_DATA] __aligned(8);
};
@@ -338,7 +345,7 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
if (t == 0)
return -EINVAL;
- adis_dev_lock(&st->adis);
+ adis_dev_auto_lock(&st->adis);
/*
* When using PPS mode, the input clock needs to be scaled so that we have an IMU
* sample rate between (optimally) 4000 and 4250. After this, we can use the
@@ -381,7 +388,7 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
sync_scale = scaled_rate / st->clk_freq;
ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
if (ret)
- goto error;
+ return ret;
sample_rate = scaled_rate;
}
@@ -393,10 +400,7 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
if (t > st->chip_info->max_dec_rate)
t = st->chip_info->max_dec_rate;
- ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
-error:
- adis_dev_unlock(&st->adis);
- return ret;
+ return __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
}
static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
@@ -406,23 +410,21 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
int ret;
unsigned int freq, sample_rate = st->clk_freq;
- adis_dev_lock(&st->adis);
+ adis_dev_auto_lock(&st->adis);
if (st->clk_mode == ADIS16480_CLK_PPS) {
u16 sync_scale;
ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
if (ret)
- goto error;
+ return ret;
sample_rate = st->clk_freq * sync_scale;
}
ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
if (ret)
- goto error;
-
- adis_dev_unlock(&st->adis);
+ return ret;
freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
@@ -430,9 +432,6 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
*val2 = (freq % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
-error:
- adis_dev_unlock(&st->adis);
- return ret;
}
enum {
@@ -447,6 +446,12 @@ enum {
ADIS16480_SCAN_MAGN_Z,
ADIS16480_SCAN_BARO,
ADIS16480_SCAN_TEMP,
+ ADIS16480_SCAN_DELTANG_X,
+ ADIS16480_SCAN_DELTANG_Y,
+ ADIS16480_SCAN_DELTANG_Z,
+ ADIS16480_SCAN_DELTVEL_X,
+ ADIS16480_SCAN_DELTVEL_Y,
+ ADIS16480_SCAN_DELTVEL_Z,
};
static const unsigned int adis16480_calibbias_regs[] = {
@@ -617,11 +622,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
offset = ad16480_filter_data[chan->scan_index][1];
enable_mask = BIT(offset + 2);
- adis_dev_lock(&st->adis);
+ adis_dev_auto_lock(&st->adis);
ret = __adis_read_reg_16(&st->adis, reg, &val);
if (ret)
- goto out_unlock;
+ return ret;
if (freq == 0) {
val &= ~enable_mask;
@@ -643,11 +648,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
val |= enable_mask;
}
- ret = __adis_write_reg_16(&st->adis, reg, val);
-out_unlock:
- adis_dev_unlock(&st->adis);
-
- return ret;
+ return __adis_write_reg_16(&st->adis, reg, val);
}
static int adis16480_read_raw(struct iio_dev *indio_dev,
@@ -690,6 +691,14 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
*val = 131; /* 1310mbar = 131 kPa */
*val2 = 32767 << 16;
return IIO_VAL_FRACTIONAL;
+ case IIO_DELTA_ANGL:
+ *val = st->chip_info->deltang_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_DELTA_VELOCITY:
+ *val = st->chip_info->deltvel_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
@@ -763,6 +772,24 @@ static int adis16480_write_raw(struct iio_dev *indio_dev,
BIT(IIO_CHAN_INFO_CALIBSCALE), \
32)
+#define ADIS16480_DELTANG_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _DELTAANG_OUT, ADIS16480_SCAN_DELTANG_ ## _mod, \
+ 0, 32)
+
+#define ADIS16480_DELTANG_CHANNEL_NO_SCAN(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _DELTAANG_OUT, -1, 0, 32)
+
+#define ADIS16480_DELTVEL_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _DELTAVEL_OUT, ADIS16480_SCAN_DELTVEL_ ## _mod, \
+ 0, 32)
+
+#define ADIS16480_DELTVEL_CHANNEL_NO_SCAN(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _DELTAVEL_OUT, -1, 0, 32)
+
#define ADIS16480_MAGN_CHANNEL(_mod) \
ADIS16480_MOD_CHANNEL(IIO_MAGN, IIO_MOD_ ## _mod, \
ADIS16480_REG_ ## _mod ## _MAGN_OUT, ADIS16480_SCAN_MAGN_ ## _mod, \
@@ -818,7 +845,13 @@ static const struct iio_chan_spec adis16480_channels[] = {
ADIS16480_MAGN_CHANNEL(Z),
ADIS16480_PRESSURE_CHANNEL(),
ADIS16480_TEMP_CHANNEL(),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(11),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Z),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Z),
};
static const struct iio_chan_spec adis16485_channels[] = {
@@ -829,7 +862,30 @@ static const struct iio_chan_spec adis16485_channels[] = {
ADIS16480_ACCEL_CHANNEL(Y),
ADIS16480_ACCEL_CHANNEL(Z),
ADIS16480_TEMP_CHANNEL(),
- IIO_CHAN_SOFT_TIMESTAMP(7)
+ IIO_CHAN_SOFT_TIMESTAMP(7),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Z),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Z),
+};
+
+static const struct iio_chan_spec adis16545_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_TEMP_CHANNEL(),
+ ADIS16480_DELTANG_CHANNEL(X),
+ ADIS16480_DELTANG_CHANNEL(Y),
+ ADIS16480_DELTANG_CHANNEL(Z),
+ ADIS16480_DELTVEL_CHANNEL(X),
+ ADIS16480_DELTVEL_CHANNEL(Y),
+ ADIS16480_DELTVEL_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(17),
};
enum adis16480_variant {
@@ -844,6 +900,12 @@ enum adis16480_variant {
ADIS16497_1,
ADIS16497_2,
ADIS16497_3,
+ ADIS16545_1,
+ ADIS16545_2,
+ ADIS16545_3,
+ ADIS16547_1,
+ ADIS16547_2,
+ ADIS16547_3
};
#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0
@@ -872,33 +934,33 @@ static const char * const adis16480_status_error_msgs[] = {
static int adis16480_enable_irq(struct adis *adis, bool enable);
-#define ADIS16480_DATA(_prod_id, _timeouts, _burst_len) \
-{ \
- .diag_stat_reg = ADIS16480_REG_DIAG_STS, \
- .glob_cmd_reg = ADIS16480_REG_GLOB_CMD, \
- .prod_id_reg = ADIS16480_REG_PROD_ID, \
- .prod_id = (_prod_id), \
- .has_paging = true, \
- .read_delay = 5, \
- .write_delay = 5, \
- .self_test_mask = BIT(1), \
- .self_test_reg = ADIS16480_REG_GLOB_CMD, \
- .status_error_msgs = adis16480_status_error_msgs, \
- .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) | \
- BIT(ADIS16480_DIAG_STAT_BARO_FAIL), \
- .enable_irq = adis16480_enable_irq, \
- .timeouts = (_timeouts), \
- .burst_reg_cmd = ADIS16495_REG_BURST_CMD, \
- .burst_len = (_burst_len), \
- .burst_max_speed_hz = ADIS16495_BURST_MAX_SPEED \
+#define ADIS16480_DATA(_prod_id, _timeouts, _burst_len, _burst_max_speed) \
+{ \
+ .diag_stat_reg = ADIS16480_REG_DIAG_STS, \
+ .glob_cmd_reg = ADIS16480_REG_GLOB_CMD, \
+ .prod_id_reg = ADIS16480_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .has_paging = true, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .self_test_mask = BIT(1), \
+ .self_test_reg = ADIS16480_REG_GLOB_CMD, \
+ .status_error_msgs = adis16480_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_BARO_FAIL), \
+ .enable_irq = adis16480_enable_irq, \
+ .timeouts = (_timeouts), \
+ .burst_reg_cmd = ADIS16495_REG_BURST_CMD, \
+ .burst_len = (_burst_len), \
+ .burst_max_speed_hz = _burst_max_speed \
}
static const struct adis_timeout adis16485_timeouts = {
@@ -925,6 +987,12 @@ static const struct adis_timeout adis16495_1_timeouts = {
.self_test_ms = 20,
};
+static const struct adis_timeout adis16545_timeouts = {
+ .reset_ms = 315,
+ .sw_reset_ms = 270,
+ .self_test_ms = 35,
+};
+
static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16375] = {
.channels = adis16485_channels,
@@ -940,11 +1008,13 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(21973 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(180),
+ .deltvel_max_val = 100,
.int_clk = 2460000,
.max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
+ .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0, 0),
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -954,11 +1024,13 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(12500 << 16),
.accel_max_scale = 10,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
.int_clk = 2460000,
.max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
+ .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0, 0),
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -968,11 +1040,13 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
.accel_max_scale = 5,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 50,
.int_clk = 2460000,
.max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
+ .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0, 0),
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -982,11 +1056,13 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(22500 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
.int_clk = 2460000,
.max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
+ .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0, 0),
},
[ADIS16490] = {
.channels = adis16485_channels,
@@ -996,11 +1072,13 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(16000 << 16),
.accel_max_scale = 8,
.temp_scale = 14285, /* 14.285 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts, 0),
+ .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts, 0, 0),
},
[ADIS16495_1] = {
.channels = adis16485_channels,
@@ -1010,13 +1088,16 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 100,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
},
[ADIS16495_2] = {
.channels = adis16485_channels,
@@ -1026,13 +1107,16 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 100,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
},
[ADIS16495_3] = {
.channels = adis16485_channels,
@@ -1042,13 +1126,16 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 100,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
},
[ADIS16497_1] = {
.channels = adis16485_channels,
@@ -1058,13 +1145,16 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 400,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
},
[ADIS16497_2] = {
.channels = adis16485_channels,
@@ -1074,13 +1164,16 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 400,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
},
[ADIS16497_3] = {
.channels = adis16485_channels,
@@ -1090,13 +1183,136 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 4250000,
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
/* 20 elements of 16bits */
.adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
- ADIS16495_BURST_MAX_DATA * 2),
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6000000),
+ },
+ [ADIS16545_1] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 8,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 100,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16545, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
+ },
+ [ADIS16545_2] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 8,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 100,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16545, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
+ },
+ [ADIS16545_3] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 8,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 100,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16545, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
+ },
+ [ADIS16547_1] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 40,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 400,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16547, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
+ },
+ [ADIS16547_2] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 40,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 400,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16547, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
+ },
+ [ADIS16547_3] = {
+ .channels = adis16545_channels,
+ .num_channels = ARRAY_SIZE(adis16545_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
+ .accel_max_scale = 40,
+ .temp_scale = 7000, /* 7 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .has_burst_delta_data = true,
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16547, &adis16545_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2,
+ 6500000),
},
};
@@ -1122,41 +1338,38 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
struct adis16480 *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
struct device *dev = &adis->spi->dev;
- int ret, bit, offset, i = 0;
+ int ret, bit, offset, i = 0, buff_offset = 0;
__be16 *buffer;
u32 crc;
bool valid;
- adis_dev_lock(adis);
- if (adis->current_page != 0) {
- adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
- adis->tx[1] = 0;
- ret = spi_write(adis->spi, adis->tx, 2);
+ adis_dev_auto_scoped_lock(adis) {
+ if (adis->current_page != 0) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = 0;
+ ret = spi_write(adis->spi, adis->tx, 2);
+ if (ret) {
+ dev_err(dev, "Failed to change device page: %d\n", ret);
+ goto irq_done;
+ }
+
+ adis->current_page = 0;
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
if (ret) {
- dev_err(dev, "Failed to change device page: %d\n", ret);
- adis_dev_unlock(adis);
+ dev_err(dev, "Failed to read data: %d\n", ret);
goto irq_done;
}
-
- adis->current_page = 0;
- }
-
- ret = spi_sync(adis->spi, &adis->msg);
- if (ret) {
- dev_err(dev, "Failed to read data: %d\n", ret);
- adis_dev_unlock(adis);
- goto irq_done;
}
- adis_dev_unlock(adis);
-
/*
* After making the burst request, the response can have one or two
* 16-bit responses containing the BURST_ID depending on the sclk. If
* clk > 3.6MHz, then we will have two BURST_ID in a row. If clk < 3MHZ,
* we have only one. To manage that variation, we use the transition from the
- * BURST_ID to the SYS_E_FLAG register, which will not be equal to 0xA5A5. If
- * we not find this variation in the first 4 segments, then the data should
+ * BURST_ID to the SYS_E_FLAG register, which will not be equal to 0xA5A5/0xC3C3.
+ * If we not find this variation in the first 4 segments, then the data should
* not be valid.
*/
buffer = adis->buffer;
@@ -1164,7 +1377,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
u16 curr = be16_to_cpu(buffer[offset]);
u16 next = be16_to_cpu(buffer[offset + 1]);
- if (curr == ADIS16495_BURST_ID && next != ADIS16495_BURST_ID) {
+ if (curr == st->burst_id && next != st->burst_id) {
offset++;
break;
}
@@ -1191,11 +1404,24 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
switch (bit) {
case ADIS16480_SCAN_TEMP:
st->data[i++] = buffer[offset + 1];
+ /*
+ * The temperature channel has 16-bit storage size.
+ * We need to perform the padding to have the buffer
+ * elements naturally aligned in case there are any
+ * 32-bit storage size channels enabled which are added
+ * in the buffer after the temprature data. In case
+ * there is no data being added after the temperature
+ * data, the padding is harmless.
+ */
+ st->data[i++] = 0;
break;
+ case ADIS16480_SCAN_DELTANG_X ... ADIS16480_SCAN_DELTVEL_Z:
+ buff_offset = ADIS16480_SCAN_DELTANG_X;
+ fallthrough;
case ADIS16480_SCAN_GYRO_X ... ADIS16480_SCAN_ACCEL_Z:
/* The lower register data is sequenced first */
- st->data[i++] = buffer[2 * bit + offset + 3];
- st->data[i++] = buffer[2 * bit + offset + 2];
+ st->data[i++] = buffer[2 * (bit - buff_offset) + offset + 3];
+ st->data[i++] = buffer[2 * (bit - buff_offset) + offset + 2];
break;
}
}
@@ -1207,10 +1433,41 @@ irq_done:
return IRQ_HANDLED;
}
+static const unsigned long adis16545_channel_masks[] = {
+ ADIS16545_BURST_DATA_SEL_0_CHN_MASK | BIT(ADIS16480_SCAN_TEMP) | BIT(17),
+ ADIS16545_BURST_DATA_SEL_1_CHN_MASK | BIT(ADIS16480_SCAN_TEMP) | BIT(17),
+ 0,
+};
+
+static int adis16480_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ u16 en;
+ int ret;
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ if (st->chip_info->has_burst_delta_data) {
+ if (*scan_mask & ADIS16545_BURST_DATA_SEL_0_CHN_MASK) {
+ en = FIELD_PREP(ADIS16545_BURST_DATA_SEL_MASK, 0);
+ st->burst_id = ADIS16495_GYRO_ACCEL_BURST_ID;
+ } else {
+ en = FIELD_PREP(ADIS16545_BURST_DATA_SEL_MASK, 1);
+ st->burst_id = ADIS16545_DELTA_ANG_VEL_BURST_ID;
+ }
+
+ ret = __adis_update_bits(&st->adis, ADIS16480_REG_CONFIG,
+ ADIS16545_BURST_DATA_SEL_MASK, en);
+ if (ret)
+ return ret;
+ }
+
+ return adis_update_scan_mode(indio_dev, scan_mask);
+}
+
static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
- .update_scan_mode = adis_update_scan_mode,
+ .update_scan_mode = &adis16480_update_scan_mode,
.debugfs_reg_access = adis_debugfs_reg_access,
};
@@ -1407,6 +1664,8 @@ static int adis16480_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ if (st->chip_info->has_burst_delta_data)
+ indio_dev->available_scan_masks = adis16545_channel_masks;
indio_dev->info = &adis16480_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -1420,6 +1679,13 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
+ /*
+ * By default, use burst id for gyroscope and accelerometer data.
+ * This is the only option for devices which do not offer delta angle
+ * and delta velocity burst readings.
+ */
+ st->burst_id = ADIS16495_GYRO_ACCEL_BURST_ID;
+
if (st->chip_info->has_sleep_cnt) {
ret = devm_add_action_or_reset(dev, adis16480_stop, indio_dev);
if (ret)
@@ -1493,6 +1759,12 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16497-1", ADIS16497_1 },
{ "adis16497-2", ADIS16497_2 },
{ "adis16497-3", ADIS16497_3 },
+ { "adis16545-1", ADIS16545_1 },
+ { "adis16545-2", ADIS16545_2 },
+ { "adis16545-3", ADIS16545_3 },
+ { "adis16547-1", ADIS16547_1 },
+ { "adis16547-2", ADIS16547_2 },
+ { "adis16547-3", ADIS16547_3 },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16480_ids);
@@ -1509,6 +1781,12 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16497-1" },
{ .compatible = "adi,adis16497-2" },
{ .compatible = "adi,adis16497-3" },
+ { .compatible = "adi,adis16545-1" },
+ { .compatible = "adi,adis16545-2" },
+ { .compatible = "adi,adis16545-3" },
+ { .compatible = "adi,adis16547-1" },
+ { .compatible = "adi,adis16547-2" },
+ { .compatible = "adi,adis16547-3" },
{ },
};
MODULE_DEVICE_TABLE(of, adis16480_of_match);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 928933027ae3..b7c1cc04492a 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -126,6 +126,26 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_NS_GPL(adis_update_scan_mode, IIO_ADISLIB);
+static int adis_paging_trigger_handler(struct adis *adis)
+{
+ int ret;
+
+ guard(mutex)(&adis->state_lock);
+ if (adis->current_page != 0) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = 0;
+ ret = spi_write(adis->spi, adis->tx, 2);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+ return ret;
+ }
+
+ adis->current_page = 0;
+ }
+
+ return spi_sync(adis->spi, &adis->msg);
+}
+
static irqreturn_t adis_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -133,25 +153,10 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
struct adis *adis = iio_device_get_drvdata(indio_dev);
int ret;
- if (adis->data->has_paging) {
- mutex_lock(&adis->state_lock);
- if (adis->current_page != 0) {
- adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
- adis->tx[1] = 0;
- ret = spi_write(adis->spi, adis->tx, 2);
- if (ret) {
- dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
- mutex_unlock(&adis->state_lock);
- goto irq_done;
- }
-
- adis->current_page = 0;
- }
- }
-
- ret = spi_sync(adis->spi, &adis->msg);
if (adis->data->has_paging)
- mutex_unlock(&adis->state_lock);
+ ret = adis_paging_trigger_handler(adis);
+ else
+ ret = spi_sync(adis->spi, &adis->msg);
if (ret) {
dev_err(&adis->spi->dev, "Failed to read data: %d", ret);
goto irq_done;
@@ -175,31 +180,36 @@ static void adis_buffer_cleanup(void *arg)
}
/**
- * devm_adis_setup_buffer_and_trigger() - Sets up buffer and trigger for
- * the managed adis device
+ * devm_adis_setup_buffer_and_trigger_with_attrs() - Sets up buffer and trigger
+ * for the managed adis device with buffer attributes.
* @adis: The adis device
* @indio_dev: The IIO device
- * @trigger_handler: Optional trigger handler, may be NULL.
+ * @trigger_handler: Trigger handler: should handle the buffer readings.
+ * @ops: Optional buffer setup functions, may be NULL.
+ * @buffer_attrs: Extra buffer attributes.
*
* Returns 0 on success, a negative error code otherwise.
*
- * This function sets up the buffer and trigger for a adis devices. If
- * 'trigger_handler' is NULL the default trigger handler will be used. The
- * default trigger handler will simply read the registers assigned to the
- * currently active channels.
+ * This function sets up the buffer (with buffer setup functions and extra
+ * buffer attributes) and trigger for a adis devices with buffer attributes.
*/
int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler)
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs)
{
int ret;
if (!trigger_handler)
trigger_handler = adis_trigger_handler;
- ret = devm_iio_triggered_buffer_setup(&adis->spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup_ext(&adis->spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ trigger_handler,
+ IIO_BUFFER_DIRECTION_IN,
+ ops,
+ buffer_attrs);
if (ret)
return ret;
@@ -212,5 +222,4 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup,
adis);
}
-EXPORT_SYMBOL_NS_GPL(devm_adis_setup_buffer_and_trigger, IIO_ADISLIB);
-
+EXPORT_SYMBOL_NS_GPL(devm_adis_setup_buffer_and_trigger_with_attrs, IIO_ADISLIB);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f890bf842db8..a8740b043cfe 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -34,17 +34,24 @@ static int adis_validate_irq_flag(struct adis *adis)
if (adis->data->unmasked_drdy)
adis->irq_flag |= IRQF_NO_AUTOEN;
/*
- * Typically this devices have data ready either on the rising edge or
- * on the falling edge of the data ready pin. This checks enforces that
- * one of those is set in the drivers... It defaults to
- * IRQF_TRIGGER_RISING for backward compatibility with devices that
- * don't support changing the pin polarity.
+ * Typically adis devices without FIFO have data ready either on the
+ * rising edge or on the falling edge of the data ready pin.
+ * IMU devices with FIFO support have the watermark pin level driven
+ * either high or low when the FIFO is filled with the desired number
+ * of samples.
+ * It defaults to IRQF_TRIGGER_RISING for backward compatibility with
+ * devices that don't support changing the pin polarity.
*/
if (direction == IRQF_TRIGGER_NONE) {
adis->irq_flag |= IRQF_TRIGGER_RISING;
return 0;
} else if (direction != IRQF_TRIGGER_RISING &&
- direction != IRQF_TRIGGER_FALLING) {
+ direction != IRQF_TRIGGER_FALLING && !adis->data->has_fifo) {
+ dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n",
+ adis->irq_flag);
+ return -EINVAL;
+ } else if (direction != IRQF_TRIGGER_HIGH &&
+ direction != IRQF_TRIGGER_LOW && adis->data->has_fifo) {
dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n",
adis->irq_flag);
return -EINVAL;
@@ -77,11 +84,19 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret)
return ret;
- ret = devm_request_irq(&adis->spi->dev, adis->spi->irq,
- &iio_trigger_generic_data_rdy_poll,
- adis->irq_flag,
- indio_dev->name,
- adis->trig);
+ if (adis->data->has_fifo)
+ ret = devm_request_threaded_irq(&adis->spi->dev, adis->spi->irq,
+ NULL,
+ &iio_trigger_generic_data_rdy_poll,
+ adis->irq_flag | IRQF_ONESHOT,
+ indio_dev->name,
+ adis->trig);
+ else
+ ret = devm_request_irq(&adis->spi->dev, adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ adis->irq_flag,
+ indio_dev->name,
+ adis->trig);
if (ret)
return ret;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index a77f1a8348ff..90aa04d94da5 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -26,6 +26,7 @@
#include "bmi160.h"
#define BMI160_REG_CHIP_ID 0x00
+#define BMI120_CHIP_ID_VAL 0xD3
#define BMI160_CHIP_ID_VAL 0xD1
#define BMI160_REG_PMU_STATUS 0x03
@@ -112,6 +113,11 @@
.ext_info = bmi160_ext_info, \
}
+static const u8 bmi_chip_ids[] = {
+ BMI120_CHIP_ID_VAL,
+ BMI160_CHIP_ID_VAL,
+};
+
/* scan indexes follow DATA register order */
enum bmi160_scan_axis {
BMI160_SCAN_EXT_MAGN_X = 0,
@@ -704,6 +710,16 @@ static int bmi160_setup_irq(struct iio_dev *indio_dev, int irq,
return bmi160_probe_trigger(indio_dev, irq, irq_type);
}
+static int bmi160_check_chip_id(const u8 chip_id)
+{
+ for (int i = 0; i < ARRAY_SIZE(bmi_chip_ids); i++) {
+ if (chip_id == bmi_chip_ids[i])
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
{
int ret;
@@ -737,12 +753,10 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
dev_err(dev, "Error reading chip id\n");
goto disable_regulator;
}
- if (val != BMI160_CHIP_ID_VAL) {
- dev_err(dev, "Wrong chip id, got %x expected %x\n",
- val, BMI160_CHIP_ID_VAL);
- ret = -ENODEV;
- goto disable_regulator;
- }
+
+ ret = bmi160_check_chip_id(val);
+ if (ret)
+ dev_warn(dev, "Chip id not found: %x\n", val);
ret = bmi160_set_mode(data, BMI160_ACCEL, true);
if (ret)
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index a081305254db..3aa5d748f9b6 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -37,7 +37,8 @@ static int bmi160_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id bmi160_i2c_id[] = {
- {"bmi160", 0},
+ { "bmi120" },
+ { "bmi160" },
{}
};
MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
@@ -52,12 +53,14 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
* the affected devices are from 2021/2022.
*/
{"10EC5280", 0},
+ {"BMI0120", 0},
{"BMI0160", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
static const struct of_device_id bmi160_of_match[] = {
+ { .compatible = "bosch,bmi120" },
{ .compatible = "bosch,bmi160" },
{ },
};
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index 8b573ea99af2..9f40500132f7 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -34,18 +34,21 @@ static int bmi160_spi_probe(struct spi_device *spi)
}
static const struct spi_device_id bmi160_spi_id[] = {
+ {"bmi120", 0},
{"bmi160", 0},
{}
};
MODULE_DEVICE_TABLE(spi, bmi160_spi_id);
static const struct acpi_device_id bmi160_acpi_match[] = {
+ {"BMI0120", 0},
{"BMI0160", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
static const struct of_device_id bmi160_of_match[] = {
+ { .compatible = "bosch,bmi120" },
{ .compatible = "bosch,bmi160" },
{ },
};
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 5d42ab9b176a..d708d1fe3e42 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels,
ARRAY_SIZE(data->buffer.channels));
if (ret)
- return IRQ_NONE;
+ goto out;
} else {
for_each_set_bit(bit, indio_dev->active_scan_mask,
BMI323_CHAN_MAX) {
@@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels[index++],
BMI323_BYTES_PER_SAMPLE);
if (ret)
- return IRQ_NONE;
+ goto out;
}
}
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -2083,9 +2084,11 @@ int bmi323_core_probe(struct device *dev)
if (ret)
return -EINVAL;
- ret = iio_read_mount_matrix(dev, &data->orientation);
- if (ret)
- return ret;
+ if (!iio_read_acpi_mount_matrix(dev, &data->orientation, "ROTM")) {
+ ret = iio_read_mount_matrix(dev, &data->orientation);
+ if (ret)
+ return ret;
+ }
indio_dev->name = "bmi323-imu";
indio_dev->info = &bmi323_info;
diff --git a/drivers/iio/imu/bno055/bno055_i2c.c b/drivers/iio/imu/bno055/bno055_i2c.c
index 6ecd750c6b76..cf3dd62a83ba 100644
--- a/drivers/iio/imu/bno055/bno055_i2c.c
+++ b/drivers/iio/imu/bno055/bno055_i2c.c
@@ -30,7 +30,7 @@ static int bno055_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id bno055_i2c_id[] = {
- {"bno055", 0},
+ { "bno055" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bno055_i2c_id);
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
index e99677ad96a2..2cc4a27a4527 100644
--- a/drivers/iio/imu/fxos8700_i2c.c
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -36,7 +36,7 @@ static int fxos8700_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id fxos8700_i2c_id[] = {
- {"fxos8700", 0},
+ { "fxos8700" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fxos8700_i2c_id);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index c4ac91f6bafe..3a07e43e4cf1 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -177,11 +177,15 @@ struct inv_icm42600_state {
* struct inv_icm42600_sensor_state - sensor state variables
* @scales: table of scales.
* @scales_len: length (nb of items) of the scales table.
+ * @power_mode: sensor requested power mode (for common frequencies)
+ * @filter: sensor filter.
* @ts: timestamp module states.
*/
struct inv_icm42600_sensor_state {
const int *scales;
size_t scales_len;
+ enum inv_icm42600_sensor_mode power_mode;
+ enum inv_icm42600_filter filter;
struct inv_sensors_timestamp ts;
};
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 83d8504ebfff..56ac19814250 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -55,8 +55,108 @@ enum inv_icm42600_accel_scan {
INV_ICM42600_ACCEL_SCAN_TIMESTAMP,
};
+static const char * const inv_icm42600_accel_power_mode_items[] = {
+ "low-noise",
+ "low-power",
+};
+static const int inv_icm42600_accel_power_mode_values[] = {
+ INV_ICM42600_SENSOR_MODE_LOW_NOISE,
+ INV_ICM42600_SENSOR_MODE_LOW_POWER,
+};
+static const int inv_icm42600_accel_filter_values[] = {
+ INV_ICM42600_FILTER_BW_ODR_DIV_2,
+ INV_ICM42600_FILTER_AVG_16X,
+};
+
+static int inv_icm42600_accel_power_mode_set(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int idx)
+{
+ struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(indio_dev);
+ int power_mode, filter;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ if (idx >= ARRAY_SIZE(inv_icm42600_accel_power_mode_values))
+ return -EINVAL;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ power_mode = inv_icm42600_accel_power_mode_values[idx];
+ filter = inv_icm42600_accel_filter_values[idx];
+
+ guard(mutex)(&st->lock);
+
+ /* prevent change if power mode is not supported by the ODR */
+ switch (power_mode) {
+ case INV_ICM42600_SENSOR_MODE_LOW_NOISE:
+ if (st->conf.accel.odr >= INV_ICM42600_ODR_6_25HZ_LP &&
+ st->conf.accel.odr <= INV_ICM42600_ODR_1_5625HZ_LP)
+ return -EPERM;
+ break;
+ case INV_ICM42600_SENSOR_MODE_LOW_POWER:
+ default:
+ if (st->conf.accel.odr <= INV_ICM42600_ODR_1KHZ_LN)
+ return -EPERM;
+ break;
+ }
+
+ accel_st->power_mode = power_mode;
+ accel_st->filter = filter;
+
+ return 0;
+}
+
+static int inv_icm42600_accel_power_mode_get(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(indio_dev);
+ unsigned int idx;
+ int power_mode;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+
+ /* if sensor is on, returns actual power mode and not configured one */
+ switch (st->conf.accel.mode) {
+ case INV_ICM42600_SENSOR_MODE_LOW_POWER:
+ case INV_ICM42600_SENSOR_MODE_LOW_NOISE:
+ power_mode = st->conf.accel.mode;
+ break;
+ default:
+ power_mode = accel_st->power_mode;
+ break;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(inv_icm42600_accel_power_mode_values); ++idx) {
+ if (power_mode == inv_icm42600_accel_power_mode_values[idx])
+ break;
+ }
+ if (idx >= ARRAY_SIZE(inv_icm42600_accel_power_mode_values))
+ return -EINVAL;
+
+ return idx;
+}
+
+static const struct iio_enum inv_icm42600_accel_power_mode_enum = {
+ .items = inv_icm42600_accel_power_mode_items,
+ .num_items = ARRAY_SIZE(inv_icm42600_accel_power_mode_items),
+ .set = inv_icm42600_accel_power_mode_set,
+ .get = inv_icm42600_accel_power_mode_get,
+};
+
static const struct iio_chan_spec_ext_info inv_icm42600_accel_ext_infos[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, inv_icm42600_get_mount_matrix),
+ IIO_ENUM_AVAILABLE("power_mode", IIO_SHARED_BY_TYPE,
+ &inv_icm42600_accel_power_mode_enum),
+ IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE,
+ &inv_icm42600_accel_power_mode_enum),
{},
};
@@ -120,7 +220,8 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
if (*scan_mask & INV_ICM42600_SCAN_MASK_ACCEL_3AXIS) {
/* enable accel sensor */
- conf.mode = INV_ICM42600_SENSOR_MODE_LOW_NOISE;
+ conf.mode = accel_st->power_mode;
+ conf.filter = accel_st->filter;
ret = inv_icm42600_set_accel_conf(st, &conf, &sleep_accel);
if (ret)
goto out_unlock;
@@ -130,10 +231,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
@@ -144,10 +241,12 @@ out_unlock:
return ret;
}
-static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st,
+static int inv_icm42600_accel_read_sensor(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int16_t *val)
{
+ struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(indio_dev);
struct device *dev = regmap_get_device(st->map);
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
unsigned int reg;
@@ -175,7 +274,8 @@ static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st,
mutex_lock(&st->lock);
/* enable accel sensor */
- conf.mode = INV_ICM42600_SENSOR_MODE_LOW_NOISE;
+ conf.mode = accel_st->power_mode;
+ conf.filter = accel_st->filter;
ret = inv_icm42600_set_accel_conf(st, &conf, NULL);
if (ret)
goto exit;
@@ -277,6 +377,12 @@ static int inv_icm42600_accel_write_scale(struct iio_dev *indio_dev,
/* IIO format int + micro */
static const int inv_icm42600_accel_odr[] = {
+ /* 1.5625Hz */
+ 1, 562500,
+ /* 3.125Hz */
+ 3, 125000,
+ /* 6.25Hz */
+ 6, 250000,
/* 12.5Hz */
12, 500000,
/* 25Hz */
@@ -296,6 +402,9 @@ static const int inv_icm42600_accel_odr[] = {
};
static const int inv_icm42600_accel_odr_conv[] = {
+ INV_ICM42600_ODR_1_5625HZ_LP,
+ INV_ICM42600_ODR_3_125HZ_LP,
+ INV_ICM42600_ODR_6_25HZ_LP,
INV_ICM42600_ODR_12_5HZ,
INV_ICM42600_ODR_25HZ,
INV_ICM42600_ODR_50HZ,
@@ -581,7 +690,7 @@ static int inv_icm42600_accel_read_raw(struct iio_dev *indio_dev,
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
- ret = inv_icm42600_accel_read_sensor(st, chan, &data);
+ ret = inv_icm42600_accel_read_sensor(indio_dev, chan, &data);
iio_device_release_direct_mode(indio_dev);
if (ret)
return ret;
@@ -754,6 +863,9 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
accel_st->scales_len = ARRAY_SIZE(inv_icm42600_accel_scale);
break;
}
+ /* low-power by default at init */
+ accel_st->power_mode = INV_ICM42600_SENSOR_MODE_LOW_POWER;
+ accel_st->filter = INV_ICM42600_FILTER_AVG_16X;
/*
* clock period is 32kHz (31250ns)
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 63b85ec88c13..aae7c56481a3 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
latency_accel = period_accel * wm_accel;
/* 0 value for watermark means that the sensor is turned off */
+ if (wm_gyro == 0 && wm_accel == 0)
+ return 0;
+
if (latency_gyro == 0) {
watermark = wm_accel;
+ st->fifo.watermark.eff_accel = wm_accel;
} else if (latency_accel == 0) {
watermark = wm_gyro;
+ st->fifo.watermark.eff_gyro = wm_gyro;
} else {
/* compute the smallest latency that is a multiple of both */
if (latency_gyro <= latency_accel)
@@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
watermark = latency / period;
if (watermark < 1)
watermark = 1;
+ /* update effective watermark */
+ st->fifo.watermark.eff_gyro = latency / period_gyro;
+ if (st->fifo.watermark.eff_gyro < 1)
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = latency / period_accel;
+ if (st->fifo.watermark.eff_accel < 1)
+ st->fifo.watermark.eff_accel = 1;
}
/* compute watermark value in bytes */
@@ -262,9 +274,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
/* restore watermark interrupt */
if (restore) {
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
- INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN,
- INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
+ ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
+ INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
return ret;
}
@@ -306,9 +317,8 @@ static int inv_icm42600_buffer_postenable(struct iio_dev *indio_dev)
}
/* set FIFO threshold interrupt */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
- INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN,
- INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
+ ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
+ INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
goto out_unlock;
@@ -363,8 +373,8 @@ static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
goto out_unlock;
/* disable FIFO threshold interrupt */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
- INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN, 0);
+ ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
+ INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
goto out_unlock;
@@ -514,7 +524,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle gyroscope timestamp and FIFO data parsing */
if (st->fifo.nb.gyro > 0) {
ts = &gyro_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro,
st->timestamp.gyro);
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
if (ret)
@@ -524,7 +534,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle accelerometer timestamp and FIFO data parsing */
if (st->fifo.nb.accel > 0) {
ts = &accel_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel,
st->timestamp.accel);
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
if (ret)
@@ -577,6 +587,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st)
unsigned int val;
int ret;
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = 1;
+
/*
* Default FIFO configuration (bits 7 to 5)
* - use invalid value
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
index 8b85ee333bf8..f6c85daf42b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
@@ -32,6 +32,8 @@ struct inv_icm42600_fifo {
struct {
unsigned int gyro;
unsigned int accel;
+ unsigned int eff_gyro;
+ unsigned int eff_accel;
} watermark;
size_t count;
struct {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 96116a68ab29..c3924cc6190e 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -34,12 +34,56 @@ static const struct regmap_range_cfg inv_icm42600_regmap_ranges[] = {
},
};
+static const struct regmap_range inv_icm42600_regmap_volatile_yes_ranges[] = {
+ /* Sensor data registers */
+ regmap_reg_range(0x001D, 0x002A),
+ /* INT status, FIFO, APEX data */
+ regmap_reg_range(0x002D, 0x0038),
+ /* Signal path reset */
+ regmap_reg_range(0x004B, 0x004B),
+ /* FIFO lost packets */
+ regmap_reg_range(0x006C, 0x006D),
+ /* Timestamp value */
+ regmap_reg_range(0x1062, 0x1064),
+};
+
+static const struct regmap_range inv_icm42600_regmap_volatile_no_ranges[] = {
+ regmap_reg_range(0x0000, 0x001C),
+ regmap_reg_range(0x006E, 0x1061),
+ regmap_reg_range(0x1065, 0x4FFF),
+};
+
+static const struct regmap_access_table inv_icm42600_regmap_volatile_accesses[] = {
+ {
+ .yes_ranges = inv_icm42600_regmap_volatile_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(inv_icm42600_regmap_volatile_yes_ranges),
+ .no_ranges = inv_icm42600_regmap_volatile_no_ranges,
+ .n_no_ranges = ARRAY_SIZE(inv_icm42600_regmap_volatile_no_ranges),
+ },
+};
+
+static const struct regmap_range inv_icm42600_regmap_rd_noinc_no_ranges[] = {
+ regmap_reg_range(0x0000, INV_ICM42600_REG_FIFO_DATA - 1),
+ regmap_reg_range(INV_ICM42600_REG_FIFO_DATA + 1, 0x4FFF),
+};
+
+static const struct regmap_access_table inv_icm42600_regmap_rd_noinc_accesses[] = {
+ {
+ .no_ranges = inv_icm42600_regmap_rd_noinc_no_ranges,
+ .n_no_ranges = ARRAY_SIZE(inv_icm42600_regmap_rd_noinc_no_ranges),
+ },
+};
+
const struct regmap_config inv_icm42600_regmap_config = {
+ .name = "inv_icm42600",
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x4FFF,
.ranges = inv_icm42600_regmap_ranges,
.num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges),
+ .volatile_table = inv_icm42600_regmap_volatile_accesses,
+ .rd_noinc_table = inv_icm42600_regmap_rd_noinc_accesses,
+ .cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, IIO_ICM42600);
@@ -248,6 +292,23 @@ int inv_icm42600_set_accel_conf(struct inv_icm42600_state *st,
if (conf->filter < 0)
conf->filter = oldconf->filter;
+ /* force power mode against ODR when sensor is on */
+ switch (conf->mode) {
+ case INV_ICM42600_SENSOR_MODE_LOW_POWER:
+ case INV_ICM42600_SENSOR_MODE_LOW_NOISE:
+ if (conf->odr <= INV_ICM42600_ODR_1KHZ_LN) {
+ conf->mode = INV_ICM42600_SENSOR_MODE_LOW_NOISE;
+ conf->filter = INV_ICM42600_FILTER_BW_ODR_DIV_2;
+ } else if (conf->odr >= INV_ICM42600_ODR_6_25HZ_LP &&
+ conf->odr <= INV_ICM42600_ODR_1_5625HZ_LP) {
+ conf->mode = INV_ICM42600_SENSOR_MODE_LOW_POWER;
+ conf->filter = INV_ICM42600_FILTER_AVG_16X;
+ }
+ break;
+ default:
+ break;
+ }
+
/* set ACCEL_CONFIG0 register (accel fullscale & odr) */
if (conf->fs != oldconf->fs || conf->odr != oldconf->odr) {
val = INV_ICM42600_ACCEL_CONFIG0_FS(conf->fs) |
@@ -435,9 +496,18 @@ static int inv_icm42600_setup(struct inv_icm42600_state *st,
return ret;
/* sensor data in big-endian (default) */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
- INV_ICM42600_INTF_CONFIG0_SENSOR_DATA_ENDIAN,
- INV_ICM42600_INTF_CONFIG0_SENSOR_DATA_ENDIAN);
+ ret = regmap_set_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
+ INV_ICM42600_INTF_CONFIG0_SENSOR_DATA_ENDIAN);
+ if (ret)
+ return ret;
+
+ /*
+ * Use RC clock for accel low-power to fix glitches when switching
+ * gyro on/off while accel low-power is on.
+ */
+ ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG1,
+ INV_ICM42600_INTF_CONFIG1_ACCEL_LP_CLK_RC,
+ INV_ICM42600_INTF_CONFIG1_ACCEL_LP_CLK_RC);
if (ret)
return ret;
@@ -532,11 +602,12 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq,
return ret;
/* Deassert async reset for proper INT pin operation (cf datasheet) */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INT_CONFIG1,
- INV_ICM42600_INT_CONFIG1_ASYNC_RESET, 0);
+ ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_CONFIG1,
+ INV_ICM42600_INT_CONFIG1_ASYNC_RESET);
if (ret)
return ret;
+ irq_type |= IRQF_ONESHOT;
return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp,
inv_icm42600_irq_handler, irq_type,
"inv_icm42600", st);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index e6f8de80128c..938af5b640b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 8d33504d770f..ebb31b385881 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -28,8 +28,8 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st)
INV_ICM42600_INTF_CONFIG6_MASK,
INV_ICM42600_INTF_CONFIG6_I3C_EN);
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
- INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
+ ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
+ INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index cc2bf1799a46..eae5ff7a3cc1 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -27,8 +27,8 @@ static int inv_icm42600_spi_bus_setup(struct inv_icm42600_state *st)
if (ret)
return ret;
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
- INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
+ ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
+ INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 0dc0f22a5582..3d3b27f28c9d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
- inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp);
- inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0);
+ inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0);
/* clear internal data buffer for avoiding kernel data leak */
memset(data, 0, sizeof(data));
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 1b603567ccc8..84273660ca2e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
if (!st->trig)
return -ENOMEM;
+ irq_type |= IRQF_ONESHOT;
ret = devm_request_threaded_irq(&indio_dev->dev, st->irq,
&inv_mpu6050_interrupt_timestamp,
&inv_mpu6050_interrupt_handle,
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 7d3e061f3046..d37eca5ef761 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1505,7 +1505,7 @@ static const struct acpi_device_id kmx61_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, kmx61_acpi_match);
static const struct i2c_device_id kmx61_id[] = {
- {"kmx611021", 0},
+ { "kmx611021" },
{}
};
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index 929aff4040ed..efe05be284b6 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -561,11 +561,9 @@ struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
}
fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
- if (IS_ERR(fwnode)) {
- dev_err_probe(dev, PTR_ERR(fwnode),
- "Cannot get Firmware reference\n");
- return ERR_CAST(fwnode);
- }
+ if (IS_ERR(fwnode))
+ return dev_err_cast_probe(dev, fwnode,
+ "Cannot get Firmware reference\n");
guard(mutex)(&iio_back_lock);
list_for_each_entry(back, &iio_back_list, entry) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index cec58a604d73..d6fe105d2f40 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -9,15 +9,20 @@
* - Better memory allocation techniques?
* - Alternative access techniques?
*/
+#include <linux/atomic.h>
#include <linux/anon_inodes.h>
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
@@ -29,6 +34,34 @@
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+struct iio_dmabuf_priv {
+ struct list_head entry;
+ struct kref ref;
+
+ struct iio_buffer *buffer;
+ struct iio_dma_buffer_block *block;
+
+ u64 context;
+
+ /* Spinlock used for locking the dma_fence */
+ spinlock_t lock;
+
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ atomic_t seqno;
+};
+
+struct iio_dma_fence {
+ struct dma_fence base;
+ struct iio_dmabuf_priv *priv;
+ struct work_struct work;
+};
+
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
[IIO_LE] = "le",
@@ -333,6 +366,8 @@ void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
INIT_LIST_HEAD(&buffer->buffer_list);
+ INIT_LIST_HEAD(&buffer->dmabufs);
+ mutex_init(&buffer->dmabufs_mutex);
init_waitqueue_head(&buffer->pollq);
kref_init(&buffer->ref);
if (!buffer->watermark)
@@ -365,8 +400,16 @@ static ssize_t iio_show_fixed_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 type = this_attr->c->scan_type.endianness;
+ const struct iio_scan_type *scan_type;
+ u8 type;
+
+ scan_type = iio_get_current_scan_type(indio_dev, this_attr->c);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ type = scan_type->endianness;
if (type == IIO_CPU) {
#ifdef __LITTLE_ENDIAN
@@ -375,21 +418,21 @@ static ssize_t iio_show_fixed_type(struct device *dev,
type = IIO_BE;
#endif
}
- if (this_attr->c->scan_type.repeat > 1)
+ if (scan_type->repeat > 1)
return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
iio_endian_prefix[type],
- this_attr->c->scan_type.sign,
- this_attr->c->scan_type.realbits,
- this_attr->c->scan_type.storagebits,
- this_attr->c->scan_type.repeat,
- this_attr->c->scan_type.shift);
+ scan_type->sign,
+ scan_type->realbits,
+ scan_type->storagebits,
+ scan_type->repeat,
+ scan_type->shift);
else
return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
iio_endian_prefix[type],
- this_attr->c->scan_type.sign,
- this_attr->c->scan_type.realbits,
- this_attr->c->scan_type.storagebits,
- this_attr->c->scan_type.shift);
+ scan_type->sign,
+ scan_type->realbits,
+ scan_type->storagebits,
+ scan_type->shift);
}
static ssize_t iio_scan_el_show(struct device *dev,
@@ -690,20 +733,27 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
}
-static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
- unsigned int scan_index)
+static int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
+ unsigned int scan_index)
{
const struct iio_chan_spec *ch;
+ const struct iio_scan_type *scan_type;
unsigned int bytes;
ch = iio_find_channel_from_si(indio_dev, scan_index);
- bytes = ch->scan_type.storagebits / 8;
- if (ch->scan_type.repeat > 1)
- bytes *= ch->scan_type.repeat;
+ scan_type = iio_get_current_scan_type(indio_dev, ch);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ bytes = scan_type->storagebits / 8;
+
+ if (scan_type->repeat > 1)
+ bytes *= scan_type->repeat;
+
return bytes;
}
-static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
+static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -721,6 +771,9 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
for_each_set_bit(i, mask,
indio_dev->masklength) {
length = iio_storage_bytes_for_si(indio_dev, i);
+ if (length < 0)
+ return length;
+
bytes = ALIGN(bytes, length);
bytes += length;
largest = max(largest, length);
@@ -728,6 +781,9 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
+ if (length < 0)
+ return length;
+
bytes = ALIGN(bytes, length);
bytes += length;
largest = max(largest, length);
@@ -1007,14 +1063,22 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
indio_dev->masklength,
in_ind + 1);
while (in_ind != out_ind) {
- length = iio_storage_bytes_for_si(indio_dev, in_ind);
+ ret = iio_storage_bytes_for_si(indio_dev, in_ind);
+ if (ret < 0)
+ goto error_clear_mux_table;
+
+ length = ret;
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
}
- length = iio_storage_bytes_for_si(indio_dev, in_ind);
+ ret = iio_storage_bytes_for_si(indio_dev, in_ind);
+ if (ret < 0)
+ goto error_clear_mux_table;
+
+ length = ret;
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
@@ -1025,7 +1089,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
}
/* Relies on scan_timestamp being last */
if (buffer->scan_timestamp) {
- length = iio_storage_bytes_for_timestamp(indio_dev);
+ ret = iio_storage_bytes_for_timestamp(indio_dev);
+ if (ret < 0)
+ goto error_clear_mux_table;
+
+ length = ret;
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
@@ -1493,14 +1561,55 @@ static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
}
+static void iio_buffer_dmabuf_release(struct kref *ref)
+{
+ struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref);
+ struct dma_buf_attachment *attach = priv->attach;
+ struct iio_buffer *buffer = priv->buffer;
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ buffer->access->detach_dmabuf(buffer, priv->block);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dmabuf);
+ kfree(priv);
+}
+
+static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach)
+{
+ struct iio_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_get(&priv->ref);
+}
+
+static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach)
+{
+ struct iio_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_put(&priv->ref, iio_buffer_dmabuf_release);
+}
+
static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_dev_buffer_pair *ib = filep->private_data;
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_buffer *buffer = ib->buffer;
+ struct iio_dmabuf_priv *priv, *tmp;
wake_up(&buffer->pollq);
+ guard(mutex)(&buffer->dmabufs_mutex);
+
+ /* Close all attached DMABUFs */
+ list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) {
+ list_del_init(&priv->entry);
+ iio_buffer_dmabuf_put(priv->attach);
+ }
+
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
iio_device_put(indio_dev);
@@ -1508,11 +1617,393 @@ static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
return 0;
}
+static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
+{
+ if (!nonblock)
+ return dma_resv_lock_interruptible(dmabuf->resv, NULL);
+
+ if (!dma_resv_trylock(dmabuf->resv))
+ return -EBUSY;
+
+ return 0;
+}
+
+static struct dma_buf_attachment *
+iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
+ struct dma_buf *dmabuf, bool nonblock)
+{
+ struct device *dev = ib->indio_dev->dev.parent;
+ struct iio_buffer *buffer = ib->buffer;
+ struct dma_buf_attachment *attach = NULL;
+ struct iio_dmabuf_priv *priv;
+
+ guard(mutex)(&buffer->dmabufs_mutex);
+
+ list_for_each_entry(priv, &buffer->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ attach = priv->attach;
+ break;
+ }
+ }
+
+ if (attach)
+ iio_buffer_dmabuf_get(attach);
+
+ return attach ?: ERR_PTR(-EPERM);
+}
+
+static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
+ int __user *user_fd, bool nonblock)
+{
+ struct iio_dev *indio_dev = ib->indio_dev;
+ struct iio_buffer *buffer = ib->buffer;
+ struct dma_buf_attachment *attach;
+ struct iio_dmabuf_priv *priv, *each;
+ struct dma_buf *dmabuf;
+ int err, fd;
+
+ if (!buffer->access->attach_dmabuf
+ || !buffer->access->detach_dmabuf
+ || !buffer->access->enqueue_dmabuf)
+ return -EPERM;
+
+ if (copy_from_user(&fd, user_fd, sizeof(fd)))
+ return -EFAULT;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->context = dma_fence_context_alloc(1);
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf)) {
+ err = PTR_ERR(dmabuf);
+ goto err_free_priv;
+ }
+
+ attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ err = iio_dma_resv_lock(dmabuf, nonblock);
+ if (err)
+ goto err_dmabuf_detach;
+
+ priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN
+ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ priv->sgt = dma_buf_map_attachment(attach, priv->dir);
+ if (IS_ERR(priv->sgt)) {
+ err = PTR_ERR(priv->sgt);
+ dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err);
+ goto err_resv_unlock;
+ }
+
+ kref_init(&priv->ref);
+ priv->buffer = buffer;
+ priv->attach = attach;
+ attach->importer_priv = priv;
+
+ priv->block = buffer->access->attach_dmabuf(buffer, attach);
+ if (IS_ERR(priv->block)) {
+ err = PTR_ERR(priv->block);
+ goto err_dmabuf_unmap_attachment;
+ }
+
+ dma_resv_unlock(dmabuf->resv);
+
+ mutex_lock(&buffer->dmabufs_mutex);
+
+ /*
+ * Check whether we already have an attachment for this driver/DMABUF
+ * combo. If we do, refuse to attach.
+ */
+ list_for_each_entry(each, &buffer->dmabufs, entry) {
+ if (each->attach->dev == indio_dev->dev.parent
+ && each->attach->dmabuf == dmabuf) {
+ /*
+ * We unlocked the reservation object, so going through
+ * the cleanup code would mean re-locking it first.
+ * At this stage it is simpler to free the attachment
+ * using iio_buffer_dma_put().
+ */
+ mutex_unlock(&buffer->dmabufs_mutex);
+ iio_buffer_dmabuf_put(attach);
+ return -EBUSY;
+ }
+ }
+
+ /* Otherwise, add the new attachment to our dmabufs list. */
+ list_add(&priv->entry, &buffer->dmabufs);
+ mutex_unlock(&buffer->dmabufs_mutex);
+
+ return 0;
+
+err_dmabuf_unmap_attachment:
+ dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
+err_resv_unlock:
+ dma_resv_unlock(dmabuf->resv);
+err_dmabuf_detach:
+ dma_buf_detach(dmabuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+err_free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
+ int __user *user_req, bool nonblock)
+{
+ struct iio_buffer *buffer = ib->buffer;
+ struct iio_dev *indio_dev = ib->indio_dev;
+ struct iio_dmabuf_priv *priv;
+ struct dma_buf *dmabuf;
+ int dmabuf_fd, ret = -EPERM;
+
+ if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd)))
+ return -EFAULT;
+
+ dmabuf = dma_buf_get(dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ guard(mutex)(&buffer->dmabufs_mutex);
+
+ list_for_each_entry(priv, &buffer->dmabufs, entry) {
+ if (priv->attach->dev == indio_dev->dev.parent
+ && priv->attach->dmabuf == dmabuf) {
+ list_del(&priv->entry);
+
+ /* Unref the reference from iio_buffer_attach_dmabuf() */
+ iio_buffer_dmabuf_put(priv->attach);
+ ret = 0;
+ break;
+ }
+ }
+
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static const char *
+iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "iio";
+}
+
+static void iio_buffer_dma_fence_release(struct dma_fence *fence)
+{
+ struct iio_dma_fence *iio_fence =
+ container_of(fence, struct iio_dma_fence, base);
+
+ kfree(iio_fence);
+}
+
+static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
+ .get_driver_name = iio_buffer_dma_fence_get_driver_name,
+ .get_timeline_name = iio_buffer_dma_fence_get_driver_name,
+ .release = iio_buffer_dma_fence_release,
+};
+
+static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
+ struct iio_dmabuf __user *iio_dmabuf_req,
+ bool nonblock)
+{
+ struct iio_buffer *buffer = ib->buffer;
+ struct iio_dmabuf iio_dmabuf;
+ struct dma_buf_attachment *attach;
+ struct iio_dmabuf_priv *priv;
+ struct iio_dma_fence *fence;
+ struct dma_buf *dmabuf;
+ unsigned long timeout;
+ bool cookie, cyclic, dma_to_ram;
+ long retl;
+ u32 seqno;
+ int ret;
+
+ if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf)))
+ return -EFAULT;
+
+ if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
+ return -EINVAL;
+
+ cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
+
+ /* Cyclic flag is only supported on output buffers */
+ if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(iio_dmabuf.fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
+ ret = -EINVAL;
+ goto err_dmabuf_put;
+ }
+
+ attach = iio_buffer_find_attachment(ib, dmabuf, nonblock);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = attach->importer_priv;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_attachment_put;
+ }
+
+ fence->priv = priv;
+
+ seqno = atomic_add_return(1, &priv->seqno);
+
+ /*
+ * The transfers are guaranteed to be processed in the order they are
+ * enqueued, so we can use a simple incrementing sequence number for
+ * the dma_fence.
+ */
+ dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
+ &priv->lock, priv->context, seqno);
+
+ ret = iio_dma_resv_lock(dmabuf, nonblock);
+ if (ret)
+ goto err_fence_put;
+
+ timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+ dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
+
+ /* Make sure we don't have writers */
+ retl = dma_resv_wait_timeout(dmabuf->resv,
+ dma_resv_usage_rw(dma_to_ram),
+ true, timeout);
+ if (retl == 0)
+ retl = -EBUSY;
+ if (retl < 0) {
+ ret = (int)retl;
+ goto err_resv_unlock;
+ }
+
+ if (buffer->access->lock_queue)
+ buffer->access->lock_queue(buffer);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (ret)
+ goto err_queue_unlock;
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base,
+ dma_to_ram ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
+ dma_resv_unlock(dmabuf->resv);
+
+ cookie = dma_fence_begin_signalling();
+
+ ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base,
+ priv->sgt, iio_dmabuf.bytes_used,
+ cyclic);
+ if (ret) {
+ /*
+ * DMABUF enqueue failed, but we already added the fence.
+ * Signal the error through the fence completion mechanism.
+ */
+ iio_buffer_signal_dmabuf_done(&fence->base, ret);
+ }
+
+ if (buffer->access->unlock_queue)
+ buffer->access->unlock_queue(buffer);
+
+ dma_fence_end_signalling(cookie);
+ dma_buf_put(dmabuf);
+
+ return ret;
+
+err_queue_unlock:
+ if (buffer->access->unlock_queue)
+ buffer->access->unlock_queue(buffer);
+err_resv_unlock:
+ dma_resv_unlock(dmabuf->resv);
+err_fence_put:
+ dma_fence_put(&fence->base);
+err_attachment_put:
+ iio_buffer_dmabuf_put(attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static void iio_buffer_cleanup(struct work_struct *work)
+{
+ struct iio_dma_fence *fence =
+ container_of(work, struct iio_dma_fence, work);
+ struct iio_dmabuf_priv *priv = fence->priv;
+ struct dma_buf_attachment *attach = priv->attach;
+
+ dma_fence_put(&fence->base);
+ iio_buffer_dmabuf_put(attach);
+}
+
+void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret)
+{
+ struct iio_dma_fence *iio_fence =
+ container_of(fence, struct iio_dma_fence, base);
+ bool cookie = dma_fence_begin_signalling();
+
+ /*
+ * Get a reference to the fence, so that it's not freed as soon as
+ * it's signaled.
+ */
+ dma_fence_get(fence);
+
+ fence->error = ret;
+ dma_fence_signal(fence);
+ dma_fence_end_signalling(cookie);
+
+ /*
+ * The fence will be unref'd in iio_buffer_cleanup.
+ * It can't be done here, as the unref functions might try to lock the
+ * resv object, which can deadlock.
+ */
+ INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
+ schedule_work(&iio_fence->work);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
+
+static long iio_buffer_chrdev_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ void __user *_arg = (void __user *)arg;
+ bool nonblock = filp->f_flags & O_NONBLOCK;
+
+ switch (cmd) {
+ case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
+ return iio_buffer_attach_dmabuf(ib, _arg, nonblock);
+ case IIO_BUFFER_DMABUF_DETACH_IOCTL:
+ return iio_buffer_detach_dmabuf(ib, _arg, nonblock);
+ case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
+ return iio_buffer_enqueue_dmabuf(ib, _arg, nonblock);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct file_operations iio_buffer_chrdev_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read,
.write = iio_buffer_write,
+ .unlocked_ioctl = iio_buffer_chrdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.poll = iio_buffer_poll,
.release = iio_buffer_chrdev_release,
};
@@ -1592,6 +2083,22 @@ static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp
}
}
+static int iio_channel_validate_scan_type(struct device *dev, int ch,
+ const struct iio_scan_type *scan_type)
+{
+ /* Verify that sample bits fit into storage */
+ if (scan_type->storagebits < scan_type->realbits + scan_type->shift) {
+ dev_err(dev,
+ "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
+ ch, scan_type->storagebits,
+ scan_type->realbits,
+ scan_type->shift);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
struct iio_dev *indio_dev,
int index)
@@ -1616,20 +2123,38 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
if (channels) {
/* new magic */
for (i = 0; i < indio_dev->num_channels; i++) {
+ const struct iio_scan_type *scan_type;
+
if (channels[i].scan_index < 0)
continue;
- /* Verify that sample bits fit into storage */
- if (channels[i].scan_type.storagebits <
- channels[i].scan_type.realbits +
- channels[i].scan_type.shift) {
- dev_err(&indio_dev->dev,
- "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
- i, channels[i].scan_type.storagebits,
- channels[i].scan_type.realbits,
- channels[i].scan_type.shift);
- ret = -EINVAL;
- goto error_cleanup_dynamic;
+ if (channels[i].has_ext_scan_type) {
+ int j;
+
+ /*
+ * get_current_scan_type is required when using
+ * extended scan types.
+ */
+ if (!indio_dev->info->get_current_scan_type) {
+ ret = -EINVAL;
+ goto error_cleanup_dynamic;
+ }
+
+ for (j = 0; j < channels[i].num_ext_scan_type; j++) {
+ scan_type = &channels[i].ext_scan_type[j];
+
+ ret = iio_channel_validate_scan_type(
+ &indio_dev->dev, i, scan_type);
+ if (ret)
+ goto error_cleanup_dynamic;
+ }
+ } else {
+ scan_type = &channels[i].scan_type;
+
+ ret = iio_channel_validate_scan_type(
+ &indio_dev->dev, i, scan_type);
+ if (ret)
+ goto error_cleanup_dynamic;
}
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
@@ -1927,6 +2452,7 @@ static void iio_buffer_release(struct kref *ref)
{
struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
+ mutex_destroy(&buffer->dmabufs_mutex);
buffer->access->release(buffer);
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fa7cc051b4c4..0f6cda7ffe45 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -727,22 +727,27 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
}
EXPORT_SYMBOL_GPL(iio_format_value);
-static ssize_t iio_read_channel_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ssize_t do_iio_read_channel_label(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *c,
+ char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
if (indio_dev->info->read_label)
- return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
+ return indio_dev->info->read_label(indio_dev, c, buf);
- if (this_attr->c->extend_name)
- return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
+ if (c->extend_name)
+ return sysfs_emit(buf, "%s\n", c->extend_name);
return -EINVAL;
}
+static ssize_t iio_read_channel_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return do_iio_read_channel_label(dev_to_iio_dev(dev),
+ to_iio_dev_attr(attr)->c, buf);
+}
+
static ssize_t iio_read_channel_info(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -758,9 +763,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
INDIO_MAX_RAW_ELEMENTS,
vals, &val_len,
this_attr->address);
- else
+ else if (indio_dev->info->read_raw)
ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
&vals[0], &vals[1], this_attr->address);
+ else
+ return -EINVAL;
if (ret < 0)
return ret;
@@ -842,6 +849,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
int length;
int type;
+ if (!indio_dev->info->read_avail)
+ return -EINVAL;
+
ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
&vals, &type, &length,
this_attr->address);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 910c1f14abd5..db06501b0e61 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -285,6 +285,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
if (ret < 0)
return ret;
+ if (!indio_dev->info->write_event_config)
+ return -EINVAL;
+
ret = indio_dev->info->write_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), val);
@@ -300,6 +303,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
+ if (!indio_dev->info->read_event_config)
+ return -EINVAL;
+
val = indio_dev->info->read_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr));
@@ -318,6 +324,9 @@ static ssize_t iio_ev_value_show(struct device *dev,
int val, val2, val_arr[2];
int ret;
+ if (!indio_dev->info->read_event_value)
+ return -EINVAL;
+
ret = indio_dev->info->read_event_value(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
@@ -572,8 +581,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
iio_check_for_dynamic_events(indio_dev)))
return 0;
- ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (ev_int == NULL)
+ ev_int = kzalloc(sizeof(*ev_int), GFP_KERNEL);
+ if (!ev_int)
return -ENOMEM;
iio_dev_opaque->event_interface = ev_int;
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index b51eb6cb766f..59d7615c0f56 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -362,17 +362,20 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
for (i = gts->num_itime - 1; i >= 0; i--) {
int new = gts->itime_table[i].time_us;
- if (times[idx] < new) {
+ if (idx == 0 || times[idx - 1] < new) {
times[idx++] = new;
continue;
}
- for (j = 0; j <= idx; j++) {
+ for (j = 0; j < idx; j++) {
+ if (times[j] == new)
+ break;
if (times[j] > new) {
memmove(&times[j + 1], &times[j],
(idx - j) * sizeof(int));
times[j] = new;
idx++;
+ break;
}
}
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 52d773261828..9f484c94bc6e 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -543,6 +543,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
+ const struct iio_info *iio_info = chan->indio_dev->info;
int unused;
int vals[INDIO_MAX_RAW_ELEMENTS];
int ret;
@@ -554,15 +555,18 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (!iio_channel_has_info(chan->channel, info))
return -EINVAL;
- if (chan->indio_dev->info->read_raw_multi) {
- ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
- chan->channel, INDIO_MAX_RAW_ELEMENTS,
- vals, &val_len, info);
+ if (iio_info->read_raw_multi) {
+ ret = iio_info->read_raw_multi(chan->indio_dev,
+ chan->channel,
+ INDIO_MAX_RAW_ELEMENTS,
+ vals, &val_len, info);
*val = vals[0];
*val2 = vals[1];
+ } else if (iio_info->read_raw) {
+ ret = iio_info->read_raw(chan->indio_dev,
+ chan->channel, val, val2, info);
} else {
- ret = chan->indio_dev->info->read_raw(chan->indio_dev,
- chan->channel, val, val2, info);
+ return -EINVAL;
}
return ret;
@@ -721,7 +725,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
return ret;
*val *= scale;
- return 0;
+ return ret;
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
@@ -750,11 +754,15 @@ static int iio_channel_read_avail(struct iio_channel *chan,
const int **vals, int *type, int *length,
enum iio_chan_info_enum info)
{
+ const struct iio_info *iio_info = chan->indio_dev->info;
+
if (!iio_channel_has_available(chan->channel, info))
return -EINVAL;
- return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
- vals, type, length, info);
+ if (iio_info->read_avail)
+ return iio_info->read_avail(chan->indio_dev, chan->channel,
+ vals, type, length, info);
+ return -EINVAL;
}
int iio_read_avail_channel_attribute(struct iio_channel *chan,
@@ -917,8 +925,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type);
static int iio_channel_write(struct iio_channel *chan, int val, int val2,
enum iio_chan_info_enum info)
{
- return chan->indio_dev->info->write_raw(chan->indio_dev,
- chan->channel, val, val2, info);
+ const struct iio_info *iio_info = chan->indio_dev->info;
+
+ if (iio_info->write_raw)
+ return iio_info->write_raw(chan->indio_dev,
+ chan->channel, val, val2, info);
+ return -EINVAL;
}
int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
@@ -998,3 +1010,9 @@ ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
chan->channel, buf, len);
}
EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
+
+ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
+{
+ return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_label);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 9a587d403118..b68dcc1fbaca 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -666,6 +666,17 @@ config VEML6030
To compile this driver as a module, choose M here: the
module will be called veml6030.
+config VEML6040
+ tristate "VEML6040 RGBW light sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6040
+ RGBW light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6040.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index a30f906e91ba..1a071a8e9f8e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VCNL4035) += vcnl4035.o
obj-$(CONFIG_VEML6030) += veml6030.o
+obj-$(CONFIG_VEML6040) += veml6040.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VEML6075) += veml6075.o
obj-$(CONFIG_VL6180) += vl6180.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5fd775a20176..5169f12c3eba 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -261,7 +261,7 @@ static int adjd_s311_probe(struct i2c_client *client)
}
static const struct i2c_device_id adjd_s311_id[] = {
- { "adjd_s311", 0 },
+ { "adjd_s311" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adjd_s311_id);
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
index aa4a6c78f0aa..2e0170be077a 100644
--- a/drivers/iio/light/adux1020.c
+++ b/drivers/iio/light/adux1020.c
@@ -539,9 +539,8 @@ static int adux1020_write_event_config(struct iio_dev *indio_dev,
* Trigger proximity interrupt when the intensity is above
* or below threshold
*/
- ret = regmap_update_bits(data->regmap, ADUX1020_REG_PROX_TYPE,
- ADUX1020_PROX_TYPE,
- ADUX1020_PROX_TYPE);
+ ret = regmap_set_bits(data->regmap, ADUX1020_REG_PROX_TYPE,
+ ADUX1020_PROX_TYPE);
if (ret < 0)
goto fail;
@@ -748,8 +747,8 @@ static int adux1020_chip_init(struct adux1020_data *data)
dev_dbg(&client->dev, "Detected ADUX1020 with chip id: 0x%04x\n", val);
- ret = regmap_update_bits(data->regmap, ADUX1020_REG_SW_RESET,
- ADUX1020_SW_RESET, ADUX1020_SW_RESET);
+ ret = regmap_set_bits(data->regmap, ADUX1020_REG_SW_RESET,
+ ADUX1020_SW_RESET);
if (ret < 0)
return ret;
@@ -764,8 +763,8 @@ static int adux1020_chip_init(struct adux1020_data *data)
return ret;
/* Use LED_IREF for proximity mode */
- ret = regmap_update_bits(data->regmap, ADUX1020_REG_LED_CURRENT,
- ADUX1020_LED_PIREF_EN, 0);
+ ret = regmap_clear_bits(data->regmap, ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_PIREF_EN);
if (ret < 0)
return ret;
@@ -821,7 +820,7 @@ static int adux1020_probe(struct i2c_client *client)
}
static const struct i2c_device_id adux1020_id[] = {
- { "adux1020", 0 },
+ { "adux1020" },
{}
};
MODULE_DEVICE_TABLE(i2c, adux1020_id);
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index 105f379b9b41..497ea3fe3377 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -236,7 +236,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend,
al3320a_resume);
static const struct i2c_device_id al3320a_id[] = {
- {"al3320a", 0},
+ { "al3320a" },
{}
};
MODULE_DEVICE_TABLE(i2c, al3320a_id);
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 0f978b30a232..11f2ab4ca261 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -493,7 +493,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(apds9300_pm_ops, apds9300_suspend,
apds9300_resume);
static const struct i2c_device_id apds9300_id[] = {
- { APDS9300_DRV_NAME, 0 },
+ { APDS9300_DRV_NAME },
{ }
};
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 1065a340b12b..e9e65130b6f9 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1107,7 +1107,7 @@ static const struct dev_pm_ops apds9960_pm_ops = {
};
static const struct i2c_device_id apds9960_id[] = {
- { "apds9960", 0 },
+ { "apds9960" },
{}
};
MODULE_DEVICE_TABLE(i2c, apds9960_id);
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index b84166c5fa06..475f44954f61 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -256,8 +256,8 @@ static DEFINE_RUNTIME_DEV_PM_OPS(bh1780_dev_pm_ops, bh1780_runtime_suspend,
bh1780_runtime_resume, NULL);
static const struct i2c_device_id bh1780_id[] = {
- { "bh1780", 0 },
- { },
+ { "bh1780" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, bh1780_id);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index d48a70efca69..b6288dd25bbf 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -368,7 +368,7 @@ static void cm3232_remove(struct i2c_client *client)
}
static const struct i2c_device_id cm3232_id[] = {
- {"cm3232", 0},
+ { "cm3232" },
{}
};
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 35d20207a648..79a64e2ff812 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -250,7 +250,7 @@ static int cm3323_probe(struct i2c_client *client)
}
static const struct i2c_device_id cm3323_id[] = {
- {"cm3323", 0},
+ { "cm3323" },
{}
};
MODULE_DEVICE_TABLE(i2c, cm3323_id);
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 97e559acba2b..a4a1505534c0 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -713,7 +713,7 @@ static void cm36651_remove(struct i2c_client *client)
}
static const struct i2c_device_id cm36651_id[] = {
- { "cm36651", 0 },
+ { "cm36651" },
{ }
};
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index fec10d5e037e..7125e011a38a 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -692,8 +692,8 @@ static DEFINE_RUNTIME_DEV_PM_OPS(gp2ap002_dev_pm_ops, gp2ap002_runtime_suspend,
gp2ap002_runtime_resume, NULL);
static const struct i2c_device_id gp2ap002_id_table[] = {
- { "gp2ap002", 0 },
- { },
+ { "gp2ap002" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, gp2ap002_id_table);
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 9f41724819b6..757383456da6 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -237,7 +237,6 @@ enum gp2ap020a00f_thresh_val_id {
};
struct gp2ap020a00f_data {
- const struct gp2ap020a00f_platform_data *pdata;
struct i2c_client *client;
struct mutex lock;
char *buffer;
@@ -1592,7 +1591,7 @@ static void gp2ap020a00f_remove(struct i2c_client *client)
}
static const struct i2c_device_id gp2ap020a00f_id[] = {
- { GP2A_I2C_NAME, 0 },
+ { GP2A_I2C_NAME },
{ }
};
diff --git a/drivers/iio/light/iqs621-als.c b/drivers/iio/light/iqs621-als.c
index 004ea890a4b2..6de33feada3a 100644
--- a/drivers/iio/light/iqs621-als.c
+++ b/drivers/iio/light/iqs621-als.c
@@ -86,8 +86,8 @@ static int iqs621_als_init(struct iqs621_als_private *iqs621_als)
if (iqs621_als->prox_en)
event_mask |= iqs62x->dev_desc->ir_mask;
- return regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
- event_mask, 0);
+ return regmap_clear_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ event_mask);
}
static int iqs621_als_notifier(struct notifier_block *notifier,
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 43484c18b101..8dfc750e68c0 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -550,9 +550,9 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
return -ENODEV;
/* Clear brownout bit */
- status = regmap_update_bits(chip->regmap,
- ISL29035_REG_DEVICE_ID,
- ISL29035_BOUT_MASK, 0);
+ status = regmap_clear_bits(chip->regmap,
+ ISL29035_REG_DEVICE_ID,
+ ISL29035_BOUT_MASK);
if (status < 0)
return status;
}
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 5694683389be..95bfb3ffa519 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -678,8 +678,8 @@ static DEFINE_RUNTIME_DEV_PM_OPS(isl29028_pm_ops, isl29028_suspend,
isl29028_resume, NULL);
static const struct i2c_device_id isl29028_id[] = {
- {"isl29028", 0},
- {"isl29030", 0},
+ { "isl29028" },
+ { "isl29030" },
{}
};
MODULE_DEVICE_TABLE(i2c, isl29028_id);
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index f1d3356d3369..59329546df58 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -327,7 +327,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(isl29125_pm_ops, isl29125_suspend,
isl29125_resume);
static const struct i2c_device_id isl29125_id[] = {
- { "isl29125", 0 },
+ { "isl29125" },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl29125_id);
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 869196746045..e7ba934c8e69 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -429,7 +429,7 @@ static const struct acpi_device_id jsa1212_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, jsa1212_acpi_match);
static const struct i2c_device_id jsa1212_id[] = {
- { JSA1212_DRIVER_NAME, 0 },
+ { JSA1212_DRIVER_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, jsa1212_id);
diff --git a/drivers/iio/light/lv0104cs.c b/drivers/iio/light/lv0104cs.c
index a5445d58fddf..916109ec3217 100644
--- a/drivers/iio/light/lv0104cs.c
+++ b/drivers/iio/light/lv0104cs.c
@@ -510,7 +510,7 @@ static int lv0104cs_probe(struct i2c_client *client)
}
static const struct i2c_device_id lv0104cs_id[] = {
- { "lv0104cs", 0 },
+ { "lv0104cs" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lv0104cs_id);
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 26b464b1b650..b935976871a6 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -598,7 +598,7 @@ static int max44000_probe(struct i2c_client *client)
}
static const struct i2c_device_id max44000_id[] = {
- {"max44000", 0},
+ { "max44000" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max44000_id);
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 61ce276e86f7..3b92362675dc 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -534,7 +534,7 @@ static const struct of_device_id max44009_of_match[] = {
MODULE_DEVICE_TABLE(of, max44009_of_match);
static const struct i2c_device_id max44009_id[] = {
- { "max44009", 0 },
+ { "max44009" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max44009_id);
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
index 1574310020e3..596cc48c4c34 100644
--- a/drivers/iio/light/noa1305.c
+++ b/drivers/iio/light/noa1305.c
@@ -268,7 +268,7 @@ static const struct of_device_id noa1305_of_match[] = {
MODULE_DEVICE_TABLE(of, noa1305_of_match);
static const struct i2c_device_id noa1305_ids[] = {
- { "noa1305", 0 },
+ { "noa1305" },
{ }
};
MODULE_DEVICE_TABLE(i2c, noa1305_ids);
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index cb41e5ee8ec1..887c4b776a86 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -822,7 +822,7 @@ static void opt3001_remove(struct i2c_client *client)
}
static const struct i2c_device_id opt3001_id[] = {
- { "opt3001", 0 },
+ { "opt3001" },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(i2c, opt3001_id);
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 636432c45651..b920bf82c102 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -462,7 +462,7 @@ static const struct acpi_device_id pa12203001_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, pa12203001_acpi_match);
static const struct i2c_device_id pa12203001_id[] = {
- { "txcpa122", 0 },
+ { "txcpa122" },
{}
};
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index bf3de853a811..4937bf6fa046 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -223,12 +223,6 @@ struct bu27034_data {
} scan;
};
-struct bu27034_result {
- u16 ch0;
- u16 ch1;
- u16 ch2;
-};
-
static const struct regmap_range bu27034_volatile_ranges[] = {
{
.range_min = BU27034_REG_SYSTEM_CONTROL,
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 40d5732b5e32..78c08e0bd077 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -1109,7 +1109,7 @@ static const struct acpi_device_id rpr0521_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rpr0521_acpi_match);
static const struct i2c_device_id rpr0521_id[] = {
- {"rpr0521", 0},
+ { "rpr0521" },
{ }
};
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index ea2c437199c0..eeff6cc792f2 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -1055,7 +1055,7 @@ static int si1133_probe(struct i2c_client *client)
}
static const struct i2c_device_id si1133_ids[] = {
- { "si1133", 0 },
+ { "si1133" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si1133_ids);
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index d4e17079b2f4..fba3997574bb 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -330,8 +330,8 @@ static int st_uvis25_suspend(struct device *dev)
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct st_uvis25_hw *hw = iio_priv(iio_dev);
- return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
- ST_UVIS25_REG_ODR_MASK, 0);
+ return regmap_clear_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+ ST_UVIS25_REG_ODR_MASK);
}
static int st_uvis25_resume(struct device *dev)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 08d471438175..e3470d6743ef 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -37,6 +37,8 @@
#define STK3310_CHIP_ID_VAL 0x13
#define STK3311_CHIP_ID_VAL 0x1D
+#define STK3311A_CHIP_ID_VAL 0x15
+#define STK3311S34_CHIP_ID_VAL 0x1E
#define STK3311X_CHIP_ID_VAL 0x12
#define STK3335_CHIP_ID_VAL 0x51
#define STK3310_PSINT_EN 0x01
@@ -81,6 +83,15 @@ static const struct reg_field stk3310_reg_field_flag_psint =
static const struct reg_field stk3310_reg_field_flag_nf =
REG_FIELD(STK3310_REG_FLAG, 0, 0);
+static const u8 stk3310_chip_ids[] = {
+ STK3310_CHIP_ID_VAL,
+ STK3311A_CHIP_ID_VAL,
+ STK3311S34_CHIP_ID_VAL,
+ STK3311X_CHIP_ID_VAL,
+ STK3311_CHIP_ID_VAL,
+ STK3335_CHIP_ID_VAL,
+};
+
/* Estimate maximum proximity values with regard to measurement scale. */
static const int stk3310_ps_max[4] = {
STK3310_PS_MAX_VAL / 640,
@@ -197,6 +208,16 @@ static const struct attribute_group stk3310_attribute_group = {
.attrs = stk3310_attributes
};
+static int stk3310_check_chip_id(const u8 chip_id)
+{
+ for (int i = 0; i < ARRAY_SIZE(stk3310_chip_ids); i++) {
+ if (chip_id == stk3310_chip_ids[i])
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int stk3310_get_index(const int table[][2], int table_size,
int val, int val2)
{
@@ -473,13 +494,9 @@ static int stk3310_init(struct iio_dev *indio_dev)
if (ret < 0)
return ret;
- if (chipid != STK3310_CHIP_ID_VAL &&
- chipid != STK3311_CHIP_ID_VAL &&
- chipid != STK3311X_CHIP_ID_VAL &&
- chipid != STK3335_CHIP_ID_VAL) {
- dev_err(&client->dev, "invalid chip id: 0x%x\n", chipid);
- return -ENODEV;
- }
+ ret = stk3310_check_chip_id(chipid);
+ if (ret < 0)
+ dev_warn(&client->dev, "unknown chip id: 0x%x\n", chipid);
state = STK3310_STATE_EN_ALS | STK3310_STATE_EN_PS;
ret = stk3310_set_state(data, state);
@@ -683,9 +700,9 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk3310_pm_ops, stk3310_suspend,
stk3310_resume);
static const struct i2c_device_id stk3310_i2c_id[] = {
- {"STK3310", 0},
- {"STK3311", 0},
- {"STK3335", 0},
+ { "STK3310" },
+ { "STK3311" },
+ { "STK3335" },
{}
};
MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index dcdd85b006be..c9566615b964 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -363,7 +363,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(tcs3414_pm_ops, tcs3414_suspend,
tcs3414_resume);
static const struct i2c_device_id tcs3414_id[] = {
- { "tcs3414", 0 },
+ { "tcs3414" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tcs3414_id);
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 75fcf2c93717..89384dba83dd 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -599,7 +599,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(tcs3472_pm_ops, tcs3472_suspend,
tcs3472_resume);
static const struct i2c_device_id tcs3472_id[] = {
- { "tcs3472", 0 },
+ { "tcs3472" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tcs3472_id);
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index 4da7d78906d4..a5788c09ad02 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -227,7 +227,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend,
tsl4531_resume);
static const struct i2c_device_id tsl4531_id[] = {
- { "tsl4531", 0 },
+ { "tsl4531" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tsl4531_id);
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 9189a1d4d7e1..de6967ac3b0b 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -955,7 +955,7 @@ static const struct acpi_device_id us5182d_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, us5182d_acpi_match);
static const struct i2c_device_id us5182d_id[] = {
- { "usd5182", 0 },
+ { "usd5182" },
{}
};
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 56bbefbc0ae6..337a1332c2c6 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -653,7 +653,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(vcnl4035_pm_ops, vcnl4035_runtime_suspend,
vcnl4035_runtime_resume, NULL);
static const struct i2c_device_id vcnl4035_id[] = {
- { "vcnl4035", 0 },
+ { "vcnl4035" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vcnl4035_id);
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 043f233d9bdb..2e86d310952e 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -144,8 +144,8 @@ static const struct attribute_group veml6030_event_attr_group = {
static int veml6030_als_pwr_on(struct veml6030_data *data)
{
- return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6030_ALS_SD, 0);
+ return regmap_clear_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD);
}
static int veml6030_als_shut_down(struct veml6030_data *data)
@@ -881,7 +881,7 @@ static const struct of_device_id veml6030_of_match[] = {
MODULE_DEVICE_TABLE(of, veml6030_of_match);
static const struct i2c_device_id veml6030_id[] = {
- { "veml6030", 0 },
+ { "veml6030" },
{ }
};
MODULE_DEVICE_TABLE(i2c, veml6030_id);
diff --git a/drivers/iio/light/veml6040.c b/drivers/iio/light/veml6040.c
new file mode 100644
index 000000000000..216e271001a8
--- /dev/null
+++ b/drivers/iio/light/veml6040.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Vishay VEML6040 RGBW light sensor driver
+ *
+ * Copyright (C) 2024 Sentec AG
+ * Author: Arthur Becker <arthur.becker@sentec.com>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+/* VEML6040 Configuration Registers
+ *
+ * SD: Shutdown
+ * AF: Auto / Force Mode (Auto Measurements On:0, Off:1)
+ * TR: Trigger Measurement (when AF Bit is set)
+ * IT: Integration Time
+ */
+#define VEML6040_CONF_REG 0x000
+#define VEML6040_CONF_SD_MSK BIT(0)
+#define VEML6040_CONF_AF_MSK BIT(1)
+#define VEML6040_CONF_TR_MSK BIT(2)
+#define VEML6040_CONF_IT_MSK GENMASK(6, 4)
+#define VEML6040_CONF_IT_40_MS 0
+#define VEML6040_CONF_IT_80_MS 1
+#define VEML6040_CONF_IT_160_MS 2
+#define VEML6040_CONF_IT_320_MS 3
+#define VEML6040_CONF_IT_640_MS 4
+#define VEML6040_CONF_IT_1280_MS 5
+
+/* VEML6040 Read Only Registers */
+#define VEML6040_REG_R 0x08
+#define VEML6040_REG_G 0x09
+#define VEML6040_REG_B 0x0A
+#define VEML6040_REG_W 0x0B
+
+static const int veml6040_it_ms[] = { 40, 80, 160, 320, 640, 1280 };
+
+enum veml6040_chan {
+ CH_RED,
+ CH_GREEN,
+ CH_BLUE,
+ CH_WHITE,
+};
+
+struct veml6040_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config veml6040_regmap_config = {
+ .name = "veml6040_regmap",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = VEML6040_REG_W,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6040_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, reg, it_index;
+ struct veml6040_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(regmap, chan->address, &reg);
+ if (ret) {
+ dev_err(dev, "Data read failed: %d\n", ret);
+ return ret;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = regmap_read(regmap, VEML6040_CONF_REG, &reg);
+ if (ret) {
+ dev_err(dev, "Data read failed: %d\n", ret);
+ return ret;
+ }
+ it_index = FIELD_GET(VEML6040_CONF_IT_MSK, reg);
+ if (it_index >= ARRAY_SIZE(veml6040_it_ms)) {
+ dev_err(dev, "Invalid Integration Time Set");
+ return -EINVAL;
+ }
+ *val = veml6040_it_ms[it_index];
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6040_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct veml6040_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ for (int i = 0; i < ARRAY_SIZE(veml6040_it_ms); i++) {
+ if (veml6040_it_ms[i] != val)
+ continue;
+
+ return regmap_update_bits(data->regmap,
+ VEML6040_CONF_REG,
+ VEML6040_CONF_IT_MSK,
+ FIELD_PREP(VEML6040_CONF_IT_MSK, i));
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6040_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(veml6040_it_ms);
+ *vals = veml6040_it_ms;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6040_info = {
+ .read_raw = veml6040_read_raw,
+ .write_raw = veml6040_write_raw,
+ .read_avail = veml6040_read_avail,
+};
+
+static const struct iio_chan_spec veml6040_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6040_REG_R,
+ .channel = CH_RED,
+ .channel2 = IIO_MOD_LIGHT_RED,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_type_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6040_REG_G,
+ .channel = CH_GREEN,
+ .channel2 = IIO_MOD_LIGHT_GREEN,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_type_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6040_REG_B,
+ .channel = CH_BLUE,
+ .channel2 = IIO_MOD_LIGHT_BLUE,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_type_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6040_REG_W,
+ .channel = CH_WHITE,
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_type_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static void veml6040_shutdown_action(void *data)
+{
+ struct veml6040_data *veml6040_data = data;
+
+ regmap_update_bits(veml6040_data->regmap, VEML6040_CONF_REG,
+ VEML6040_CONF_SD_MSK, VEML6040_CONF_SD_MSK);
+}
+
+static int veml6040_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct veml6040_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ const int init_config =
+ FIELD_PREP(VEML6040_CONF_IT_MSK, VEML6040_CONF_IT_40_MS) |
+ FIELD_PREP(VEML6040_CONF_AF_MSK, 0) |
+ FIELD_PREP(VEML6040_CONF_SD_MSK, 0);
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return dev_err_probe(dev, -EOPNOTSUPP,
+ "I2C adapter doesn't support plain I2C\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "IIO device allocation failed\n");
+
+ regmap = devm_regmap_init_i2c(client, &veml6040_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Regmap setup failed\n");
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ indio_dev->name = "veml6040";
+ indio_dev->info = &veml6040_info;
+ indio_dev->channels = veml6040_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6040_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, VEML6040_CONF_REG, init_config);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Could not set initial config\n");
+
+ ret = devm_add_action_or_reset(dev, veml6040_shutdown_action, data);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id veml6040_id_table[] = {
+ {"veml6040"},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, veml6040_id_table);
+
+static const struct of_device_id veml6040_of_match[] = {
+ {.compatible = "vishay,veml6040"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, veml6040_of_match);
+
+static struct i2c_driver veml6040_driver = {
+ .probe = veml6040_probe,
+ .id_table = veml6040_id_table,
+ .driver = {
+ .name = "veml6040",
+ .of_match_table = veml6040_of_match,
+ },
+};
+module_i2c_driver(veml6040_driver);
+
+MODULE_DESCRIPTION("veml6040 RGBW light sensor driver");
+MODULE_AUTHOR("Arthur Becker <arthur.becker@sentec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index d99bf3ae0fe8..f8321d346d77 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -189,7 +189,7 @@ static void veml6070_remove(struct i2c_client *client)
}
static const struct i2c_device_id veml6070_id[] = {
- { "veml6070", 0 },
+ { "veml6070" },
{ }
};
MODULE_DEVICE_TABLE(i2c, veml6070_id);
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index dcadf6428a87..a1b2b3c0b4c8 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -527,7 +527,7 @@ static const struct of_device_id vl6180_of_match[] = {
MODULE_DEVICE_TABLE(of, vl6180_of_match);
static const struct i2c_device_id vl6180_id[] = {
- { "vl6180", 0 },
+ { "vl6180" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vl6180_id);
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index d370193a4742..327f94e447af 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -545,7 +545,7 @@ static int zopt2201_probe(struct i2c_client *client)
}
static const struct i2c_device_id zopt2201_id[] = {
- { "zopt2201", 0 },
+ { "zopt2201" },
{ }
};
MODULE_DEVICE_TABLE(i2c, zopt2201_id);
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index 742bbdf25f08..d81d89af6283 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -505,7 +505,7 @@ static const struct of_device_id af8133j_of_match[] = {
MODULE_DEVICE_TABLE(of, af8133j_of_match);
static const struct i2c_device_id af8133j_id[] = {
- { "af8133j", 0 },
+ { "af8133j" },
{ }
};
MODULE_DEVICE_TABLE(i2c, af8133j_id);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index c74d11943ec7..961b1e0bfb13 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -327,10 +327,7 @@ static int ak8974_trigmeas(struct ak8974 *ak8974)
}
/* Force a measurement */
- return regmap_update_bits(ak8974->map,
- AK8974_CTRL3,
- AK8974_CTRL3_FORCE,
- AK8974_CTRL3_FORCE);
+ return regmap_set_bits(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_FORCE);
}
static int ak8974_await_drdy(struct ak8974 *ak8974)
@@ -438,10 +435,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
}
/* Trigger self-test */
- ret = regmap_update_bits(ak8974->map,
- AK8974_CTRL3,
- AK8974_CTRL3_SELFTEST,
- AK8974_CTRL3_SELFTEST);
+ ret = regmap_set_bits(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_SELFTEST);
if (ret) {
dev_err(dev, "could not write CTRL3\n");
return ret;
@@ -1025,10 +1019,10 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ak8974_dev_pm_ops, ak8974_runtime_suspend,
ak8974_runtime_resume, NULL);
static const struct i2c_device_id ak8974_id[] = {
- {"ami305", 0 },
- {"ami306", 0 },
- {"ak8974", 0 },
- {"hscdtd008a", 0 },
+ { "ami305" },
+ { "ami306" },
+ { "ak8974" },
+ { "hscdtd008a" },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8974_id);
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 48d9c698f520..a28d46d59875 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -47,9 +47,9 @@ static const struct acpi_device_id bmc150_magn_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
static const struct i2c_device_id bmc150_magn_i2c_id[] = {
- {"bmc150_magn", 0},
- {"bmc156_magn", 0},
- {"bmm150_magn", 0},
+ { "bmc150_magn" },
+ { "bmc156_magn" },
+ { "bmm150_magn" },
{}
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index deffe3ca9004..5295dc0100e4 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -624,7 +624,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(mag3110_pm_ops, mag3110_suspend,
mag3110_resume);
static const struct i2c_device_id mag3110_id[] = {
- { "mag3110", 0 },
+ { "mag3110" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mag3110_id);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 6b9f4b056191..dd480a4a5f98 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -186,9 +186,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
* Recharge the capacitor at VCAP pin, requested to be issued
* before a SET/RESET command.
*/
- ret = regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
- MMC35240_CTRL0_REFILL_BIT,
- MMC35240_CTRL0_REFILL_BIT);
+ ret = regmap_set_bits(data->regmap, MMC35240_REG_CTRL0,
+ MMC35240_CTRL0_REFILL_BIT);
if (ret < 0)
return ret;
usleep_range(MMC35240_WAIT_CHARGE_PUMP, MMC35240_WAIT_CHARGE_PUMP + 1);
@@ -198,8 +197,7 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
else
coil_bit = MMC35240_CTRL0_RESET_BIT;
- return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
- coil_bit, coil_bit);
+ return regmap_set_bits(data->regmap, MMC35240_REG_CTRL0, coil_bit);
}
@@ -563,7 +561,7 @@ static const struct acpi_device_id mmc35240_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, mmc35240_acpi_match);
static const struct i2c_device_id mmc35240_id[] = {
- {"mmc35240", 0},
+ { "mmc35240" },
{}
};
MODULE_DEVICE_TABLE(i2c, mmc35240_id);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 218b1ce076c1..4187abe12784 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -118,11 +118,9 @@ struct tmag5273_data {
unsigned int version;
char name[16];
unsigned int conv_avg;
- unsigned int scale;
enum tmag5273_scale_index scale_index;
unsigned int angle_measurement;
struct regmap *map;
- struct regulator *vcc;
/*
* Locks the sensor for exclusive use during a measurement (which
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index edd8c69f6d2e..2953403bef53 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -30,7 +30,6 @@ struct mux {
int cached_state;
struct mux_control *control;
struct iio_channel *parent;
- struct iio_dev *indio_dev;
struct iio_chan_spec *chan;
struct iio_chan_spec_ext_info *ext_info;
struct mux_child *child;
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index bd0f2c3bf2f2..c2c6b2b29867 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -405,8 +405,8 @@ static const struct of_device_id lmp91000_of_match[] = {
MODULE_DEVICE_TABLE(of, lmp91000_of_match);
static const struct i2c_device_id lmp91000_id[] = {
- { "lmp91000", 0 },
- { "lmp91002", 0 },
+ { "lmp91000" },
+ { "lmp91002" },
{}
};
MODULE_DEVICE_TABLE(i2c, lmp91000_id);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 09f53d987c7d..49081b729618 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -52,7 +53,6 @@
*/
enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
-
enum bmp380_odr {
BMP380_ODR_200HZ,
BMP380_ODR_100HZ,
@@ -181,18 +181,19 @@ static int bmp280_read_calib(struct bmp280_data *data)
struct bmp280_calib *calib = &data->calib.bmp280;
int ret;
-
/* Read temperature and pressure calibration values. */
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
- data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
- if (ret < 0) {
+ data->bmp280_cal_buf,
+ sizeof(data->bmp280_cal_buf));
+ if (ret) {
dev_err(data->dev,
- "failed to read temperature and pressure calibration parameters\n");
+ "failed to read calibration parameters\n");
return ret;
}
- /* Toss the temperature and pressure calibration data into the entropy pool */
- add_device_randomness(data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
+ /* Toss calibration data into the entropy pool */
+ add_device_randomness(data->bmp280_cal_buf,
+ sizeof(data->bmp280_cal_buf));
/* Parse temperature calibration values. */
calib->T1 = le16_to_cpu(data->bmp280_cal_buf[T1]);
@@ -222,10 +223,8 @@ static int bme280_read_calib(struct bmp280_data *data)
/* Load shared calibration params with bmp280 first */
ret = bmp280_read_calib(data);
- if (ret < 0) {
- dev_err(dev, "failed to read common bmp280 calibration parameters\n");
+ if (ret)
return ret;
- }
/*
* Read humidity calibration values.
@@ -235,47 +234,47 @@ static int bme280_read_calib(struct bmp280_data *data)
* Humidity data is only available on BME280.
*/
- ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
- if (ret < 0) {
+ ret = regmap_read(data->regmap, BME280_REG_COMP_H1, &tmp);
+ if (ret) {
dev_err(dev, "failed to read H1 comp value\n");
return ret;
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2,
+ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H2,
&data->le16, sizeof(data->le16));
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
calib->H2 = sign_extend32(le16_to_cpu(data->le16), 15);
- ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
- if (ret < 0) {
+ ret = regmap_read(data->regmap, BME280_REG_COMP_H3, &tmp);
+ if (ret) {
dev_err(dev, "failed to read H3 comp value\n");
return ret;
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4,
+ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H4,
&data->be16, sizeof(data->be16));
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
calib->H4 = sign_extend32(((be16_to_cpu(data->be16) >> 4) & 0xff0) |
(be16_to_cpu(data->be16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5,
+ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H5,
&data->le16, sizeof(data->le16));
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(FIELD_GET(BMP280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
+ calib->H5 = sign_extend32(FIELD_GET(BME280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
- ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
- if (ret < 0) {
+ ret = regmap_read(data->regmap, BME280_REG_COMP_H6, &tmp);
+ if (ret) {
dev_err(dev, "failed to read H6 comp value\n");
return ret;
}
@@ -283,20 +282,43 @@ static int bme280_read_calib(struct bmp280_data *data)
return 0;
}
+
+static int bme280_read_humid_adc(struct bmp280_data *data, u16 *adc_humidity)
+{
+ u16 value_humidity;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB,
+ &data->be16, sizeof(data->be16));
+ if (ret) {
+ dev_err(data->dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ value_humidity = be16_to_cpu(data->be16);
+ if (value_humidity == BMP280_HUMIDITY_SKIPPED) {
+ dev_err(data->dev, "reading humidity skipped\n");
+ return -EIO;
+ }
+ *adc_humidity = value_humidity;
+
+ return 0;
+}
+
/*
* Returns humidity in percent, resolution is 0.01 percent. Output value of
* "47445" represents 47445/1024 = 46.333 %RH.
*
* Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
*/
-static u32 bmp280_compensate_humidity(struct bmp280_data *data,
- s32 adc_humidity)
+static u32 bme280_compensate_humidity(struct bmp280_data *data,
+ u16 adc_humidity, s32 t_fine)
{
struct bmp280_calib *calib = &data->calib.bmp280;
s32 var;
- var = ((s32)data->t_fine) - (s32)76800;
- var = ((((adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
+ var = t_fine - (s32)76800;
+ var = (((((s32)adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
+ (s32)16384) >> 15) * (((((((var * calib->H6) >> 10)
* (((var * (s32)calib->H3) >> 11) + (s32)32768)) >> 10)
+ (s32)2097152) * calib->H2 + 8192) >> 14);
@@ -305,7 +327,29 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
var = clamp_val(var, 0, 419430400);
return var >> 12;
-};
+}
+
+static int bmp280_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
+{
+ u32 value_temp;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ value_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
+ if (value_temp == BMP280_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
+ *adc_temp = value_temp;
+
+ return 0;
+}
/*
* Returns temperature in DegC, resolution is 0.01 DegC. Output value of
@@ -314,20 +358,58 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
*
* Taken from datasheet, Section 3.11.3, "Compensation formula".
*/
-static s32 bmp280_compensate_temp(struct bmp280_data *data,
- s32 adc_temp)
+static s32 bmp280_calc_t_fine(struct bmp280_data *data, u32 adc_temp)
{
struct bmp280_calib *calib = &data->calib.bmp280;
s32 var1, var2;
- var1 = (((adc_temp >> 3) - ((s32)calib->T1 << 1)) *
+ var1 = (((((s32)adc_temp) >> 3) - ((s32)calib->T1 << 1)) *
((s32)calib->T2)) >> 11;
- var2 = (((((adc_temp >> 4) - ((s32)calib->T1)) *
- ((adc_temp >> 4) - ((s32)calib->T1))) >> 12) *
- ((s32)calib->T3)) >> 14;
- data->t_fine = var1 + var2;
+ var2 = (((((((s32)adc_temp) >> 4) - ((s32)calib->T1)) *
+ ((((s32)adc_temp >> 4) - ((s32)calib->T1))) >> 12) *
+ ((s32)calib->T3))) >> 14;
+ return var1 + var2; /* t_fine = var1 + var2 */
+}
+
+static int bmp280_get_t_fine(struct bmp280_data *data, s32 *t_fine)
+{
+ u32 adc_temp;
+ int ret;
+
+ ret = bmp280_read_temp_adc(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ *t_fine = bmp280_calc_t_fine(data, adc_temp);
+
+ return 0;
+}
- return (data->t_fine * 5 + 128) >> 8;
+static s32 bmp280_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+{
+ return (bmp280_calc_t_fine(data, adc_temp) * 5 + 128) / 256;
+}
+
+static int bmp280_read_press_adc(struct bmp280_data *data, u32 *adc_press)
+{
+ u32 value_press;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ value_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
+ if (value_press == BMP280_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
+ *adc_press = value_press;
+
+ return 0;
}
/*
@@ -338,12 +420,12 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
* Taken from datasheet, Section 3.11.3, "Compensation formula".
*/
static u32 bmp280_compensate_press(struct bmp280_data *data,
- s32 adc_press)
+ u32 adc_press, s32 t_fine)
{
struct bmp280_calib *calib = &data->calib.bmp280;
s64 var1, var2, p;
- var1 = ((s64)data->t_fine) - 128000;
+ var1 = ((s64)t_fine) - 128000;
var2 = var1 * var1 * (s64)calib->P6;
var2 += (var1 * (s64)calib->P5) << 17;
var2 += ((s64)calib->P4) << 35;
@@ -354,7 +436,7 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
if (var1 == 0)
return 0;
- p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
+ p = ((((s64)1048576 - (s32)adc_press) << 31) - var2) * 3125;
p = div64_s64(p, var1);
var1 = (((s64)calib->P9) * (p >> 13) * (p >> 13)) >> 25;
var2 = ((s64)(calib->P8) * p) >> 19;
@@ -366,62 +448,35 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
static int bmp280_read_temp(struct bmp280_data *data,
int *val, int *val2)
{
- s32 adc_temp, comp_temp;
+ s32 comp_temp;
+ u32 adc_temp;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- data->buf, sizeof(data->buf));
- if (ret < 0) {
- dev_err(data->dev, "failed to read temperature\n");
+ ret = bmp280_read_temp_adc(data, &adc_temp);
+ if (ret)
return ret;
- }
- adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
- if (adc_temp == BMP280_TEMP_SKIPPED) {
- /* reading was skipped */
- dev_err(data->dev, "reading temperature skipped\n");
- return -EIO;
- }
comp_temp = bmp280_compensate_temp(data, adc_temp);
- /*
- * val might be NULL if we're called by the read_press routine,
- * who only cares about the carry over t_fine value.
- */
- if (val) {
- *val = comp_temp * 10;
- return IIO_VAL_INT;
- }
-
- return 0;
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
}
static int bmp280_read_press(struct bmp280_data *data,
int *val, int *val2)
{
- u32 comp_press;
- s32 adc_press;
+ u32 comp_press, adc_press, t_fine;
int ret;
- /* Read and compensate temperature so we get a reading of t_fine. */
- ret = bmp280_read_temp(data, NULL, NULL);
- if (ret < 0)
+ ret = bmp280_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- data->buf, sizeof(data->buf));
- if (ret < 0) {
- dev_err(data->dev, "failed to read pressure\n");
+ ret = bmp280_read_press_adc(data, &adc_press);
+ if (ret)
return ret;
- }
- adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
- if (adc_press == BMP280_PRESS_SKIPPED) {
- /* reading was skipped */
- dev_err(data->dev, "reading pressure skipped\n");
- return -EIO;
- }
- comp_press = bmp280_compensate_press(data, adc_press);
+ comp_press = bmp280_compensate_press(data, adc_press, t_fine);
*val = comp_press;
*val2 = 256000;
@@ -429,116 +484,97 @@ static int bmp280_read_press(struct bmp280_data *data,
return IIO_VAL_FRACTIONAL;
}
-static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
u32 comp_humidity;
- s32 adc_humidity;
+ u16 adc_humidity;
+ s32 t_fine;
int ret;
- /* Read and compensate temperature so we get a reading of t_fine. */
- ret = bmp280_read_temp(data, NULL, NULL);
- if (ret < 0)
+ ret = bmp280_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
- &data->be16, sizeof(data->be16));
- if (ret < 0) {
- dev_err(data->dev, "failed to read humidity\n");
+ ret = bme280_read_humid_adc(data, &adc_humidity);
+ if (ret)
return ret;
- }
- adc_humidity = be16_to_cpu(data->be16);
- if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
- /* reading was skipped */
- dev_err(data->dev, "reading humidity skipped\n");
- return -EIO;
- }
- comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
+ comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
*val = comp_humidity * 1000 / 1024;
return IIO_VAL_INT;
}
-static int bmp280_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
struct bmp280_data *data = iio_priv(indio_dev);
- int ret;
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
- ret = data->chip_info->read_humid(data, val, val2);
- break;
+ return data->chip_info->read_humid(data, val, val2);
case IIO_PRESSURE:
- ret = data->chip_info->read_press(data, val, val2);
- break;
+ return data->chip_info->read_press(data, val, val2);
case IIO_TEMP:
- ret = data->chip_info->read_temp(data, val, val2);
- break;
+ return data->chip_info->read_temp(data, val, val2);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
*val = 1 << data->oversampling_humid;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_PRESSURE:
*val = 1 << data->oversampling_press;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_TEMP:
*val = 1 << data->oversampling_temp;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_SAMP_FREQ:
- if (!data->chip_info->sampling_freq_avail) {
- ret = -EINVAL;
- break;
- }
+ if (!data->chip_info->sampling_freq_avail)
+ return -EINVAL;
*val = data->chip_info->sampling_freq_avail[data->sampling_freq][0];
*val2 = data->chip_info->sampling_freq_avail[data->sampling_freq][1];
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- if (!data->chip_info->iir_filter_coeffs_avail) {
- ret = -EINVAL;
- break;
- }
+ if (!data->chip_info->iir_filter_coeffs_avail)
+ return -EINVAL;
*val = (1 << data->iir_filter_coeff) - 1;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
- mutex_unlock(&data->lock);
+static int bmp280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ pm_runtime_get_sync(data->dev);
+ ret = bmp280_read_raw_impl(indio_dev, chan, val, val2, mask);
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
}
-static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
- int val)
+static int bme280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ int val)
{
const int *avail = data->chip_info->oversampling_humid_avail;
const int n = data->chip_info->num_oversampling_humid_avail;
@@ -563,7 +599,7 @@ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
}
static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
- int val)
+ int val)
{
const int *avail = data->chip_info->oversampling_temp_avail;
const int n = data->chip_info->num_oversampling_temp_avail;
@@ -588,7 +624,7 @@ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
}
static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
- int val)
+ int val)
{
const int *avail = data->chip_info->oversampling_press_avail;
const int n = data->chip_info->num_oversampling_press_avail;
@@ -662,12 +698,13 @@ static int bmp280_write_iir_filter_coeffs(struct bmp280_data *data, int val)
return -EINVAL;
}
-static int bmp280_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int bmp280_write_raw_impl(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct bmp280_data *data = iio_priv(indio_dev);
- int ret = 0;
+
+ guard(mutex)(&data->lock);
/*
* Helper functions to update sensor running configuration.
@@ -677,45 +714,36 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
*/
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
- ret = bmp280_write_oversampling_ratio_humid(data, val);
- break;
+ return bme280_write_oversampling_ratio_humid(data, val);
case IIO_PRESSURE:
- ret = bmp280_write_oversampling_ratio_press(data, val);
- break;
+ return bmp280_write_oversampling_ratio_press(data, val);
case IIO_TEMP:
- ret = bmp280_write_oversampling_ratio_temp(data, val);
- break;
+ return bmp280_write_oversampling_ratio_temp(data, val);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- mutex_unlock(&data->lock);
- pm_runtime_mark_last_busy(data->dev);
- pm_runtime_put_autosuspend(data->dev);
- break;
case IIO_CHAN_INFO_SAMP_FREQ:
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
- ret = bmp280_write_sampling_frequency(data, val, val2);
- mutex_unlock(&data->lock);
- pm_runtime_mark_last_busy(data->dev);
- pm_runtime_put_autosuspend(data->dev);
- break;
+ return bmp280_write_sampling_frequency(data, val, val2);
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
- ret = bmp280_write_iir_filter_coeffs(data, val);
- mutex_unlock(&data->lock);
- pm_runtime_mark_last_busy(data->dev);
- pm_runtime_put_autosuspend(data->dev);
- break;
+ return bmp280_write_iir_filter_coeffs(data, val);
default:
return -EINVAL;
}
+}
+
+static int bmp280_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ pm_runtime_get_sync(data->dev);
+ ret = bmp280_write_raw_impl(indio_dev, chan, val, val2, mask);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -772,22 +800,20 @@ static int bmp280_chip_config(struct bmp280_data *data)
int ret;
ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
- BMP280_OSRS_TEMP_MASK |
- BMP280_OSRS_PRESS_MASK |
- BMP280_MODE_MASK,
- osrs | BMP280_MODE_NORMAL);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to write ctrl_meas register\n");
+ BMP280_OSRS_TEMP_MASK |
+ BMP280_OSRS_PRESS_MASK |
+ BMP280_MODE_MASK,
+ osrs | BMP280_MODE_NORMAL);
+ if (ret) {
+ dev_err(data->dev, "failed to write ctrl_meas register\n");
return ret;
}
ret = regmap_update_bits(data->regmap, BMP280_REG_CONFIG,
BMP280_FILTER_MASK,
BMP280_FILTER_4X);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to write config register\n");
+ if (ret) {
+ dev_err(data->dev, "failed to write config register\n");
return ret;
}
@@ -833,18 +859,19 @@ EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280);
static int bme280_chip_config(struct bmp280_data *data)
{
- u8 osrs = FIELD_PREP(BMP280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
+ u8 osrs = FIELD_PREP(BME280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
int ret;
/*
* Oversampling of humidity must be set before oversampling of
* temperature/pressure is set to become effective.
*/
- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
- BMP280_OSRS_HUMIDITY_MASK, osrs);
-
- if (ret < 0)
+ ret = regmap_update_bits(data->regmap, BME280_REG_CTRL_HUMIDITY,
+ BME280_OSRS_HUMIDITY_MASK, osrs);
+ if (ret) {
+ dev_err(data->dev, "failed to set humidity oversampling");
return ret;
+ }
return bmp280_chip_config(data);
}
@@ -870,12 +897,12 @@ const struct bmp280_chip_info bme280_chip_info = {
.oversampling_humid_avail = bmp280_oversampling_avail,
.num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
- .oversampling_humid_default = BMP280_OSRS_HUMIDITY_16X - 1,
+ .oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1,
.chip_config = bme280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
- .read_humid = bmp280_read_humid,
+ .read_humid = bme280_read_humid,
.read_calib = bme280_read_calib,
};
EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280);
@@ -925,16 +952,38 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
return 0;
}
+static int bmp380_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
+{
+ u32 value_temp;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ value_temp = get_unaligned_le24(data->buf);
+ if (value_temp == BMP380_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
+ *adc_temp = value_temp;
+
+ return 0;
+}
+
/*
- * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of
- * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
+ * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value
+ * of "5123" equals 51.2º C. t_fine carries fine temperature as global value.
*
* Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
* https://github.com/BoschSensortec/BMP3-Sensor-API.
*/
-static s32 bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+static s32 bmp380_calc_t_fine(struct bmp280_data *data, u32 adc_temp)
{
- s64 var1, var2, var3, var4, var5, var6, comp_temp;
+ s64 var1, var2, var3, var4, var5, var6;
struct bmp380_calib *calib = &data->calib.bmp380;
var1 = ((s64) adc_temp) - (((s64) calib->T1) << 8);
@@ -943,13 +992,57 @@ static s32 bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
var4 = var3 * ((s64) calib->T3);
var5 = (var2 << 18) + var4;
var6 = var5 >> 32;
- data->t_fine = (s32) var6;
+ return (s32)var6; /* t_fine = var6 */
+}
+
+static int bmp380_get_t_fine(struct bmp280_data *data, s32 *t_fine)
+{
+ s32 adc_temp;
+ int ret;
+
+ ret = bmp380_read_temp_adc(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ *t_fine = bmp380_calc_t_fine(data, adc_temp);
+
+ return 0;
+}
+
+static int bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+{
+ s64 comp_temp;
+ s32 var6;
+
+ var6 = bmp380_calc_t_fine(data, adc_temp);
comp_temp = (var6 * 25) >> 14;
comp_temp = clamp_val(comp_temp, BMP380_MIN_TEMP, BMP380_MAX_TEMP);
return (s32) comp_temp;
}
+static int bmp380_read_press_adc(struct bmp280_data *data, u32 *adc_press)
+{
+ u32 value_press;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ value_press = get_unaligned_le24(data->buf);
+ if (value_press == BMP380_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
+ *adc_press = value_press;
+
+ return 0;
+}
+
/*
* Returns pressure in Pa as an unsigned 32 bit integer in fractional Pascal.
* Output value of "9528709" represents 9528709/100 = 95287.09 Pa = 952.8709 hPa.
@@ -957,27 +1050,28 @@ static s32 bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
* Taken from datasheet, Section 9.3. "Pressure compensation" and repository
* https://github.com/BoschSensortec/BMP3-Sensor-API.
*/
-static u32 bmp380_compensate_press(struct bmp280_data *data, u32 adc_press)
+static u32 bmp380_compensate_press(struct bmp280_data *data,
+ u32 adc_press, s32 t_fine)
{
s64 var1, var2, var3, var4, var5, var6, offset, sensitivity;
struct bmp380_calib *calib = &data->calib.bmp380;
u32 comp_press;
- var1 = (s64)data->t_fine * (s64)data->t_fine;
+ var1 = (s64)t_fine * (s64)t_fine;
var2 = var1 >> 6;
- var3 = (var2 * ((s64) data->t_fine)) >> 8;
+ var3 = (var2 * ((s64)t_fine)) >> 8;
var4 = ((s64)calib->P8 * var3) >> 5;
var5 = ((s64)calib->P7 * var1) << 4;
- var6 = ((s64)calib->P6 * (s64)data->t_fine) << 22;
+ var6 = ((s64)calib->P6 * (s64)t_fine) << 22;
offset = ((s64)calib->P5 << 47) + var4 + var5 + var6;
var2 = ((s64)calib->P4 * var3) >> 5;
var4 = ((s64)calib->P3 * var1) << 2;
var5 = ((s64)calib->P2 - ((s64)1 << 14)) *
- ((s64)data->t_fine << 21);
+ ((s64)t_fine << 21);
sensitivity = (((s64) calib->P1 - ((s64) 1 << 14)) << 46) +
var2 + var4 + var5;
var1 = (sensitivity >> 24) * (s64)adc_press;
- var2 = (s64)calib->P10 * (s64)data->t_fine;
+ var2 = (s64)calib->P10 * (s64)t_fine;
var3 = var2 + ((s64)calib->P9 << 16);
var4 = (var3 * (s64)adc_press) >> 13;
@@ -1003,60 +1097,32 @@ static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2)
u32 adc_temp;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
- data->buf, sizeof(data->buf));
- if (ret) {
- dev_err(data->dev, "failed to read temperature\n");
+ ret = bmp380_read_temp_adc(data, &adc_temp);
+ if (ret)
return ret;
- }
- adc_temp = get_unaligned_le24(data->buf);
- if (adc_temp == BMP380_TEMP_SKIPPED) {
- dev_err(data->dev, "reading temperature skipped\n");
- return -EIO;
- }
comp_temp = bmp380_compensate_temp(data, adc_temp);
- /*
- * Val might be NULL if we're called by the read_press routine,
- * who only cares about the carry over t_fine value.
- */
- if (val) {
- /* IIO reports temperatures in milli Celsius */
- *val = comp_temp * 10;
- return IIO_VAL_INT;
- }
-
- return 0;
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
}
static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
{
- s32 comp_press;
- u32 adc_press;
+ u32 adc_press, comp_press, t_fine;
int ret;
- /* Read and compensate for temperature so we get a reading of t_fine */
- ret = bmp380_read_temp(data, NULL, NULL);
+ ret = bmp380_get_t_fine(data, &t_fine);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
- data->buf, sizeof(data->buf));
- if (ret) {
- dev_err(data->dev, "failed to read pressure\n");
+ ret = bmp380_read_press_adc(data, &adc_press);
+ if (ret)
return ret;
- }
- adc_press = get_unaligned_le24(data->buf);
- if (adc_press == BMP380_PRESS_SKIPPED) {
- dev_err(data->dev, "reading pressure skipped\n");
- return -EIO;
- }
- comp_press = bmp380_compensate_press(data, adc_press);
+ comp_press = bmp380_compensate_press(data, adc_press, t_fine);
*val = comp_press;
- /* Compensated pressure is in cPa (centipascals) */
*val2 = 100000;
return IIO_VAL_FRACTIONAL;
@@ -1069,15 +1135,17 @@ static int bmp380_read_calib(struct bmp280_data *data)
/* Read temperature and pressure calibration data */
ret = regmap_bulk_read(data->regmap, BMP380_REG_CALIB_TEMP_START,
- data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+ data->bmp380_cal_buf,
+ sizeof(data->bmp380_cal_buf));
if (ret) {
dev_err(data->dev,
- "failed to read temperature calibration parameters\n");
+ "failed to read calibration parameters\n");
return ret;
}
/* Toss the temperature calibration data into the entropy pool */
- add_device_randomness(data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+ add_device_randomness(data->bmp380_cal_buf,
+ sizeof(data->bmp380_cal_buf));
/* Parse calibration values */
calib->T1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T1]);
@@ -1159,7 +1227,8 @@ static int bmp380_chip_config(struct bmp280_data *data)
/* Configure output data rate */
ret = regmap_update_bits_check(data->regmap, BMP380_REG_ODR,
- BMP380_ODRS_MASK, data->sampling_freq, &aux);
+ BMP380_ODRS_MASK, data->sampling_freq,
+ &aux);
if (ret) {
dev_err(data->dev, "failed to write ODR selection register\n");
return ret;
@@ -1178,12 +1247,13 @@ static int bmp380_chip_config(struct bmp280_data *data)
if (change) {
/*
- * The configurations errors are detected on the fly during a measurement
- * cycle. If the sampling frequency is too low, it's faster to reset
- * the measurement loop than wait until the next measurement is due.
+ * The configurations errors are detected on the fly during a
+ * measurement cycle. If the sampling frequency is too low, it's
+ * faster to reset the measurement loop than wait until the next
+ * measurement is due.
*
- * Resets sensor measurement loop toggling between sleep and normal
- * operating modes.
+ * Resets sensor measurement loop toggling between sleep and
+ * normal operating modes.
*/
ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
BMP380_MODE_MASK,
@@ -1201,22 +1271,21 @@ static int bmp380_chip_config(struct bmp280_data *data)
return ret;
}
/*
- * Waits for measurement before checking configuration error flag.
- * Selected longest measure time indicated in section 3.9.1
- * in the datasheet.
+ * Waits for measurement before checking configuration error
+ * flag. Selected longest measure time indicated in
+ * section 3.9.1 in the datasheet.
*/
msleep(80);
/* Check config error flag */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
if (ret) {
- dev_err(data->dev,
- "failed to read error register\n");
+ dev_err(data->dev, "failed to read error register\n");
return ret;
}
if (tmp & BMP380_ERR_CONF_MASK) {
dev_warn(data->dev,
- "sensor flagged configuration as incompatible\n");
+ "sensor flagged configuration as incompatible\n");
return -EINVAL;
}
}
@@ -1317,9 +1386,11 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
}
/* Start NVM operation sequence */
- ret = regmap_write(data->regmap, BMP580_REG_CMD, BMP580_CMD_NVM_OP_SEQ_0);
+ ret = regmap_write(data->regmap, BMP580_REG_CMD,
+ BMP580_CMD_NVM_OP_SEQ_0);
if (ret) {
- dev_err(data->dev, "failed to send nvm operation's first sequence\n");
+ dev_err(data->dev,
+ "failed to send nvm operation's first sequence\n");
return ret;
}
if (is_write) {
@@ -1327,7 +1398,8 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
ret = regmap_write(data->regmap, BMP580_REG_CMD,
BMP580_CMD_NVM_WRITE_SEQ_1);
if (ret) {
- dev_err(data->dev, "failed to send nvm write sequence\n");
+ dev_err(data->dev,
+ "failed to send nvm write sequence\n");
return ret;
}
/* Datasheet says on 4.8.1.2 it takes approximately 10ms */
@@ -1338,17 +1410,14 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
ret = regmap_write(data->regmap, BMP580_REG_CMD,
BMP580_CMD_NVM_READ_SEQ_1);
if (ret) {
- dev_err(data->dev, "failed to send nvm read sequence\n");
+ dev_err(data->dev,
+ "failed to send nvm read sequence\n");
return ret;
}
/* Datasheet says on 4.8.1.1 it takes approximately 200us */
poll = 50;
timeout = 400;
}
- if (ret) {
- dev_err(data->dev, "failed to write command sequence\n");
- return -EIO;
- }
/* Wait until NVM is ready again */
ret = regmap_read_poll_timeout(data->regmap, BMP580_REG_STATUS, reg,
@@ -1394,12 +1463,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return milli Celsius
- * to respect IIO ABI.
+ * form down 2^16. We rescale by x1000 to return millidegrees
+ * Celsius to respect IIO ABI.
*/
- *val = raw_temp * 1000;
- *val2 = 16;
- return IIO_VAL_FRACTIONAL_LOG2;
+ raw_temp = sign_extend32(raw_temp, 23);
+ *val = ((s64)raw_temp * 1000) / (1 << 16);
+ return IIO_VAL_INT;
}
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
@@ -1465,15 +1534,14 @@ static const int bmp580_odr_table[][2] = {
static const int bmp580_nvmem_addrs[] = { 0x20, 0x21, 0x22 };
-static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int bmp580_nvmem_read_impl(void *priv, unsigned int offset, void *val,
+ size_t bytes)
{
struct bmp280_data *data = priv;
u16 *dst = val;
int ret, addr;
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
/* Set sensor in standby mode */
ret = regmap_update_bits(data->regmap, BMP580_REG_ODR_CONFIG,
@@ -1501,8 +1569,8 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
if (ret)
goto exit;
- ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
- sizeof(data->le16));
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB,
+ &data->le16, sizeof(data->le16));
if (ret) {
dev_err(data->dev, "error reading nvm data regs\n");
goto exit;
@@ -1515,21 +1583,31 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
exit:
/* Restore chip config */
data->chip_info->chip_config(data);
- mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct bmp280_data *data = priv;
+ int ret;
+
+ pm_runtime_get_sync(data->dev);
+ ret = bmp580_nvmem_read_impl(priv, offset, val, bytes);
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
+
return ret;
}
-static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int bmp580_nvmem_write_impl(void *priv, unsigned int offset, void *val,
+ size_t bytes)
{
struct bmp280_data *data = priv;
u16 *buf = val;
int ret, addr;
- pm_runtime_get_sync(data->dev);
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
/* Set sensor in standby mode */
ret = regmap_update_bits(data->regmap, BMP580_REG_ODR_CONFIG,
@@ -1546,7 +1624,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
while (bytes >= sizeof(*buf)) {
addr = bmp580_nvmem_addrs[offset / sizeof(*buf)];
- ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR, BMP580_NVM_PROG_EN |
+ ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR,
+ BMP580_NVM_PROG_EN |
FIELD_PREP(BMP580_NVM_ROW_ADDR_MASK, addr));
if (ret) {
dev_err(data->dev, "error writing nvm address\n");
@@ -1554,8 +1633,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
}
data->le16 = cpu_to_le16(*buf++);
- ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
- sizeof(data->le16));
+ ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB,
+ &data->le16, sizeof(data->le16));
if (ret) {
dev_err(data->dev, "error writing LSB NVM data regs\n");
goto exit;
@@ -1566,8 +1645,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
goto exit;
/* Disable programming mode bit */
- ret = regmap_update_bits(data->regmap, BMP580_REG_NVM_ADDR,
- BMP580_NVM_PROG_EN, 0);
+ ret = regmap_clear_bits(data->regmap, BMP580_REG_NVM_ADDR,
+ BMP580_NVM_PROG_EN);
if (ret) {
dev_err(data->dev, "error resetting nvm write\n");
goto exit;
@@ -1579,9 +1658,20 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
exit:
/* Restore chip config */
data->chip_info->chip_config(data);
- mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct bmp280_data *data = priv;
+ int ret;
+
+ pm_runtime_get_sync(data->dev);
+ ret = bmp580_nvmem_write_impl(priv, offset, val, bytes);
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
+
return ret;
}
@@ -1607,20 +1697,24 @@ static int bmp580_preinit(struct bmp280_data *data)
/* Post powerup sequence */
ret = regmap_read(data->regmap, BMP580_REG_CHIP_ID, &reg);
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to establish comms with the chip\n");
return ret;
+ }
/* Print warn message if we don't know the chip id */
if (reg != BMP580_CHIP_ID && reg != BMP580_CHIP_ID_ALT)
- dev_warn(data->dev, "preinit: unexpected chip_id\n");
+ dev_warn(data->dev, "unexpected chip_id\n");
ret = regmap_read(data->regmap, BMP580_REG_STATUS, &reg);
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to read nvm status\n");
return ret;
+ }
/* Check nvm status */
if (!(reg & BMP580_STATUS_NVM_RDY_MASK) || (reg & BMP580_STATUS_NVM_ERR_MASK)) {
- dev_err(data->dev, "preinit: nvm error on powerup sequence\n");
+ dev_err(data->dev, "nvm error on powerup sequence\n");
return -EIO;
}
@@ -1655,6 +1749,10 @@ static int bmp580_chip_config(struct bmp280_data *data)
BMP580_DSP_COMP_MASK |
BMP580_DSP_SHDW_IIR_TEMP_EN |
BMP580_DSP_SHDW_IIR_PRESS_EN, reg_val);
+ if (ret) {
+ dev_err(data->dev, "failed to change DSP mode settings\n");
+ return ret;
+ }
/* Configure oversampling */
reg_val = FIELD_PREP(BMP580_OSR_TEMP_MASK, data->oversampling_temp) |
@@ -1662,7 +1760,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
BMP580_OSR_PRESS_EN;
ret = regmap_update_bits_check(data->regmap, BMP580_REG_OSR_CONFIG,
- BMP580_OSR_TEMP_MASK | BMP580_OSR_PRESS_MASK |
+ BMP580_OSR_TEMP_MASK |
+ BMP580_OSR_PRESS_MASK |
BMP580_OSR_PRESS_EN,
reg_val, &aux);
if (ret) {
@@ -1713,7 +1812,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
*/
ret = regmap_read(data->regmap, BMP580_REG_EFF_OSR, &tmp);
if (ret) {
- dev_err(data->dev, "error reading effective OSR register\n");
+ dev_err(data->dev,
+ "error reading effective OSR register\n");
return ret;
}
if (!(tmp & BMP580_EFF_OSR_VALID_ODR)) {
@@ -1763,7 +1863,7 @@ const struct bmp280_chip_info bmp580_chip_info = {
};
EXPORT_SYMBOL_NS(bmp580_chip_info, IIO_BMP280);
-static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas)
{
const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
unsigned int delay_us;
@@ -1774,8 +1874,10 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
reinit_completion(&data->done);
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to write crtl_meas register\n");
return ret;
+ }
if (data->use_eoc) {
/*
@@ -1798,32 +1900,38 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
}
ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to read ctrl_meas register\n");
return ret;
+ }
/* The value of this bit reset to "0" after conversion is complete */
- if (ctrl & BMP180_MEAS_SCO)
+ if (ctrl & BMP180_MEAS_SCO) {
+ dev_err(data->dev, "conversion didn't complete\n");
return -EIO;
+ }
return 0;
}
-static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
+static int bmp180_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
{
int ret;
- ret = bmp180_measure(data,
- FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_TEMP) |
- BMP180_MEAS_SCO);
+ ret = bmp180_wait_for_eoc(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_TEMP) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
&data->be16, sizeof(data->be16));
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to read temperature\n");
return ret;
+ }
- *val = be16_to_cpu(data->be16);
+ *adc_temp = be16_to_cpu(data->be16);
return 0;
}
@@ -1836,9 +1944,10 @@ static int bmp180_read_calib(struct bmp280_data *data)
ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START,
data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
-
- if (ret < 0)
+ if (ret) {
+ dev_err(data->dev, "failed to read calibration parameters\n");
return ret;
+ }
/* None of the words has the value 0 or 0xFFFF */
for (i = 0; i < ARRAY_SIZE(data->bmp180_cal_buf); i++) {
@@ -1848,7 +1957,8 @@ static int bmp180_read_calib(struct bmp280_data *data)
}
/* Toss the calibration data into the entropy pool */
- add_device_randomness(data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
+ add_device_randomness(data->bmp180_cal_buf,
+ sizeof(data->bmp180_cal_buf));
calib->AC1 = be16_to_cpu(data->bmp180_cal_buf[AC1]);
calib->AC2 = be16_to_cpu(data->bmp180_cal_buf[AC2]);
@@ -1871,59 +1981,72 @@ static int bmp180_read_calib(struct bmp280_data *data)
*
* Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
*/
-static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
+
+static s32 bmp180_calc_t_fine(struct bmp280_data *data, u32 adc_temp)
{
struct bmp180_calib *calib = &data->calib.bmp180;
s32 x1, x2;
- x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
+ x1 = ((((s32)adc_temp) - calib->AC6) * calib->AC5) >> 15;
x2 = (calib->MC << 11) / (x1 + calib->MD);
- data->t_fine = x1 + x2;
+ return x1 + x2; /* t_fine = x1 + x2; */
+}
+
+static int bmp180_get_t_fine(struct bmp280_data *data, s32 *t_fine)
+{
+ s32 adc_temp;
+ int ret;
+
+ ret = bmp180_read_temp_adc(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ *t_fine = bmp180_calc_t_fine(data, adc_temp);
+
+ return 0;
+}
- return (data->t_fine + 8) >> 4;
+static s32 bmp180_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+{
+ return (bmp180_calc_t_fine(data, adc_temp) + 8) / 16;
}
static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2)
{
- s32 adc_temp, comp_temp;
+ s32 comp_temp;
+ u32 adc_temp;
int ret;
- ret = bmp180_read_adc_temp(data, &adc_temp);
+ ret = bmp180_read_temp_adc(data, &adc_temp);
if (ret)
return ret;
comp_temp = bmp180_compensate_temp(data, adc_temp);
- /*
- * val might be NULL if we're called by the read_press routine,
- * who only cares about the carry over t_fine value.
- */
- if (val) {
- *val = comp_temp * 100;
- return IIO_VAL_INT;
- }
-
- return 0;
+ *val = comp_temp * 100;
+ return IIO_VAL_INT;
}
-static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
+static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press)
{
u8 oss = data->oversampling_press;
int ret;
- ret = bmp180_measure(data,
- FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_PRESS) |
- FIELD_PREP(BMP180_OSRS_PRESS_MASK, oss) |
- BMP180_MEAS_SCO);
+ ret = bmp180_wait_for_eoc(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_PRESS) |
+ FIELD_PREP(BMP180_OSRS_PRESS_MASK, oss) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
data->buf, sizeof(data->buf));
- if (ret)
+ if (ret) {
+ dev_err(data->dev, "failed to read pressure\n");
return ret;
+ }
- *val = get_unaligned_be24(data->buf) >> (8 - oss);
+ *adc_press = get_unaligned_be24(data->buf) >> (8 - oss);
return 0;
}
@@ -1933,7 +2056,8 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
*
* Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
*/
-static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+static u32 bmp180_compensate_press(struct bmp280_data *data, u32 adc_press,
+ s32 t_fine)
{
struct bmp180_calib *calib = &data->calib.bmp180;
s32 oss = data->oversampling_press;
@@ -1941,7 +2065,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
s32 b3, b6;
u32 b4, b7;
- b6 = data->t_fine - 4000;
+ b6 = t_fine - 4000;
x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
x2 = calib->AC2 * b6 >> 11;
x3 = x1 + x2;
@@ -1950,7 +2074,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
x2 = (calib->B1 * ((b6 * b6) >> 12)) >> 16;
x3 = (x1 + x2 + 2) >> 2;
b4 = calib->AC4 * (u32)(x3 + 32768) >> 15;
- b7 = ((u32)adc_press - b3) * (50000 >> oss);
+ b7 = (adc_press - b3) * (50000 >> oss);
if (b7 < 0x80000000)
p = (b7 * 2) / b4;
else
@@ -1963,23 +2087,21 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
return p + ((x1 + x2 + 3791) >> 4);
}
-static int bmp180_read_press(struct bmp280_data *data,
- int *val, int *val2)
+static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
{
- u32 comp_press;
- s32 adc_press;
+ u32 comp_press, adc_press;
+ s32 t_fine;
int ret;
- /* Read and compensate temperature so we get a reading of t_fine. */
- ret = bmp180_read_temp(data, NULL, NULL);
+ ret = bmp180_get_t_fine(data, &t_fine);
if (ret)
return ret;
- ret = bmp180_read_adc_press(data, &adc_press);
+ ret = bmp180_read_press_adc(data, &adc_press);
if (ret)
return ret;
- comp_press = bmp180_compensate_press(data, adc_press);
+ comp_press = bmp180_compensate_press(data, adc_press, t_fine);
*val = comp_press;
*val2 = 1000;
@@ -2154,8 +2276,10 @@ int bmp280_common_probe(struct device *dev,
data->regmap = regmap;
ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
- if (ret < 0)
+ if (ret) {
+ dev_err(data->dev, "failed to read chip id\n");
return ret;
+ }
for (i = 0; i < data->chip_info->num_chip_id; i++) {
if (chip_id == data->chip_info->chip_id[i]) {
@@ -2175,7 +2299,7 @@ int bmp280_common_probe(struct device *dev,
}
ret = data->chip_info->chip_config(data);
- if (ret < 0)
+ if (ret)
return ret;
dev_set_drvdata(dev, indio_dev);
@@ -2188,7 +2312,7 @@ int bmp280_common_probe(struct device *dev,
if (data->chip_info->read_calib) {
ret = data->chip_info->read_calib(data);
- if (ret < 0)
+ if (ret)
return dev_err_probe(data->dev, ret,
"failed to read calibration coefficients\n");
}
@@ -2241,6 +2365,7 @@ static int bmp280_runtime_resume(struct device *dev)
ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret)
return ret;
+
usleep_range(data->start_up_time, data->start_up_time + 100);
return data->chip_info->chip_config(data);
}
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index 3ee56720428c..fa52839474b1 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -45,7 +45,7 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case BMP280_REG_CONFIG:
- case BMP280_REG_CTRL_HUMIDITY:
+ case BME280_REG_CTRL_HUMIDITY:
case BMP280_REG_CTRL_MEAS:
case BMP280_REG_RESET:
return true;
@@ -57,8 +57,8 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case BMP280_REG_HUMIDITY_LSB:
- case BMP280_REG_HUMIDITY_MSB:
+ case BME280_REG_HUMIDITY_LSB:
+ case BME280_REG_HUMIDITY_MSB:
case BMP280_REG_TEMP_XLSB:
case BMP280_REG_TEMP_LSB:
case BMP280_REG_TEMP_MSB:
@@ -167,7 +167,7 @@ const struct regmap_config bmp280_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = BMP280_REG_HUMIDITY_LSB,
+ .max_register = BME280_REG_HUMIDITY_LSB,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = bmp280_is_writeable_reg,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 4e19ea0b4d39..62b4e58104cf 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -13,7 +13,7 @@
#include "bmp280.h"
static int bmp280_regmap_spi_write(void *context, const void *data,
- size_t count)
+ size_t count)
{
struct spi_device *spi = to_spi_device(context);
u8 buf[2];
@@ -29,7 +29,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data,
}
static int bmp280_regmap_spi_read(void *context, const void *reg,
- size_t reg_size, void *val, size_t val_size)
+ size_t reg_size, void *val, size_t val_size)
{
struct spi_device *spi = to_spi_device(context);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 5812a344ed8e..7c30e4d523be 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -192,8 +192,6 @@
#define BMP380_PRESS_SKIPPED 0x800000
/* BMP280 specific registers */
-#define BMP280_REG_HUMIDITY_LSB 0xFE
-#define BMP280_REG_HUMIDITY_MSB 0xFD
#define BMP280_REG_TEMP_XLSB 0xFC
#define BMP280_REG_TEMP_LSB 0xFB
#define BMP280_REG_TEMP_MSB 0xFA
@@ -207,15 +205,6 @@
#define BMP280_REG_CONFIG 0xF5
#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_STATUS 0xF3
-#define BMP280_REG_CTRL_HUMIDITY 0xF2
-
-/* Due to non linear mapping, and data sizes we can't do a bulk read */
-#define BMP280_REG_COMP_H1 0xA1
-#define BMP280_REG_COMP_H2 0xE1
-#define BMP280_REG_COMP_H3 0xE3
-#define BMP280_REG_COMP_H4 0xE4
-#define BMP280_REG_COMP_H5 0xE5
-#define BMP280_REG_COMP_H6 0xE7
#define BMP280_REG_COMP_TEMP_START 0x88
#define BMP280_COMP_TEMP_REG_COUNT 6
@@ -223,8 +212,6 @@
#define BMP280_REG_COMP_PRESS_START 0x8E
#define BMP280_COMP_PRESS_REG_COUNT 18
-#define BMP280_COMP_H5_MASK GENMASK(15, 4)
-
#define BMP280_CONTIGUOUS_CALIB_REGS (BMP280_COMP_TEMP_REG_COUNT + \
BMP280_COMP_PRESS_REG_COUNT)
@@ -235,14 +222,6 @@
#define BMP280_FILTER_8X 3
#define BMP280_FILTER_16X 4
-#define BMP280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
-#define BMP280_OSRS_HUMIDITY_SKIP 0
-#define BMP280_OSRS_HUMIDITY_1X 1
-#define BMP280_OSRS_HUMIDITY_2X 2
-#define BMP280_OSRS_HUMIDITY_4X 3
-#define BMP280_OSRS_HUMIDITY_8X 4
-#define BMP280_OSRS_HUMIDITY_16X 5
-
#define BMP280_OSRS_TEMP_MASK GENMASK(7, 5)
#define BMP280_OSRS_TEMP_SKIP 0
#define BMP280_OSRS_TEMP_1X 1
@@ -264,6 +243,30 @@
#define BMP280_MODE_FORCED 1
#define BMP280_MODE_NORMAL 3
+/* BME280 specific registers */
+#define BME280_REG_HUMIDITY_LSB 0xFE
+#define BME280_REG_HUMIDITY_MSB 0xFD
+
+#define BME280_REG_CTRL_HUMIDITY 0xF2
+
+/* Due to non linear mapping, and data sizes we can't do a bulk read */
+#define BME280_REG_COMP_H1 0xA1
+#define BME280_REG_COMP_H2 0xE1
+#define BME280_REG_COMP_H3 0xE3
+#define BME280_REG_COMP_H4 0xE4
+#define BME280_REG_COMP_H5 0xE5
+#define BME280_REG_COMP_H6 0xE7
+
+#define BME280_COMP_H5_MASK GENMASK(15, 4)
+
+#define BME280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
+#define BME280_OSRS_HUMIDITY_SKIP 0
+#define BME280_OSRS_HUMIDITY_1X 1
+#define BME280_OSRS_HUMIDITY_2X 2
+#define BME280_OSRS_HUMIDITY_4X 3
+#define BME280_OSRS_HUMIDITY_8X 4
+#define BME280_OSRS_HUMIDITY_16X 5
+
/* BMP180 specific registers */
#define BMP180_REG_OUT_XLSB 0xF8
#define BMP180_REG_OUT_LSB 0xF7
@@ -395,12 +398,6 @@ struct bmp280_data {
int sampling_freq;
/*
- * Carryover value from temperature conversion, used in pressure
- * calculation.
- */
- s32 t_fine;
-
- /*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
@@ -449,12 +446,12 @@ struct bmp280_chip_info {
int num_sampling_freq_avail;
int sampling_freq_default;
- int (*chip_config)(struct bmp280_data *);
- int (*read_temp)(struct bmp280_data *, int *, int *);
- int (*read_press)(struct bmp280_data *, int *, int *);
- int (*read_humid)(struct bmp280_data *, int *, int *);
- int (*read_calib)(struct bmp280_data *);
- int (*preinit)(struct bmp280_data *);
+ int (*chip_config)(struct bmp280_data *data);
+ int (*read_temp)(struct bmp280_data *data, int *val, int *val2);
+ int (*read_press)(struct bmp280_data *data, int *val, int *val2);
+ int (*read_humid)(struct bmp280_data *data, int *val, int *val2);
+ int (*read_calib)(struct bmp280_data *data);
+ int (*preinit)(struct bmp280_data *data);
};
/* Chip infos for each variant */
@@ -473,7 +470,7 @@ extern const struct regmap_config bmp580_regmap_config;
/* Probe called from different transports */
int bmp280_common_probe(struct device *dev,
struct regmap *regmap,
- const struct bmp280_chip_info *,
+ const struct bmp280_chip_info *chip_info,
const char *name,
int irq);
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 7d882e15e556..c6f44f0f4d2e 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -887,7 +887,7 @@ static int dps310_probe(struct i2c_client *client)
}
static const struct i2c_device_id dps310_id[] = {
- { DPS310_DEV_NAME, 0 },
+ { DPS310_DEV_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, dps310_id);
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index 8bdb279129fa..6f7a16787143 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -266,8 +266,8 @@ static int hp03_probe(struct i2c_client *client)
}
static const struct i2c_device_id hp03_id[] = {
- { "hp03", 0 },
- { },
+ { "hp03" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, hp03_id);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 2086f3ef338f..3e0bf5d31ad7 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -637,7 +637,7 @@ static const struct of_device_id icp10100_of_match[] = {
MODULE_DEVICE_TABLE(of, icp10100_of_match);
static const struct i2c_device_id icp10100_id[] = {
- { "icp10100", 0 },
+ { "icp10100" },
{ }
};
MODULE_DEVICE_TABLE(i2c, icp10100_id);
diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c
index fcbdadf4a511..0c51dc02478e 100644
--- a/drivers/iio/pressure/mpl115_i2c.c
+++ b/drivers/iio/pressure/mpl115_i2c.c
@@ -45,7 +45,7 @@ static int mpl115_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id mpl115_i2c_id[] = {
- { "mpl115", 0 },
+ { "mpl115" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id);
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 7aa19584c340..71ded2eee060 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -318,7 +318,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(mpl3115_pm_ops, mpl3115_suspend,
mpl3115_resume);
static const struct i2c_device_id mpl3115_id[] = {
- { "mpl3115", 0 },
+ { "mpl3115" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mpl3115_id);
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index a6463c06975e..c7cb0fd816d3 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -251,7 +251,7 @@ static int t5403_probe(struct i2c_client *client)
}
static const struct i2c_device_id t5403_id[] = {
- { "t5403", 0 },
+ { "t5403" },
{ }
};
MODULE_DEVICE_TABLE(i2c, t5403_id);
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index c7fffbe7c788..4833e525c393 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -59,8 +59,8 @@ static void zpa2326_remove_i2c(struct i2c_client *client)
}
static const struct i2c_device_id zpa2326_i2c_ids[] = {
- { "zpa2326", 0 },
- { },
+ { "zpa2326" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, zpa2326_i2c_ids);
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index 4982686fb4c3..dc66ca9bba6b 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -989,7 +989,7 @@ static int isl29501_probe(struct i2c_client *client)
}
static const struct i2c_device_id isl29501_id[] = {
- {"isl29501", 0},
+ { "isl29501" },
{}
};
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 2913d5e0fe4f..5c959730aecd 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -322,9 +322,9 @@ static void lidar_remove(struct i2c_client *client)
}
static const struct i2c_device_id lidar_id[] = {
- {"lidar-lite-v2", 0},
- {"lidar-lite-v3", 0},
- { },
+ { "lidar-lite-v2" },
+ { "lidar-lite-v3" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lidar_id);
diff --git a/drivers/iio/proximity/rfd77402.c b/drivers/iio/proximity/rfd77402.c
index f02e83e3f15f..aff60a3c1a6f 100644
--- a/drivers/iio/proximity/rfd77402.c
+++ b/drivers/iio/proximity/rfd77402.c
@@ -308,7 +308,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(rfd77402_pm_ops, rfd77402_suspend,
rfd77402_resume);
static const struct i2c_device_id rfd77402_id[] = {
- { "rfd77402", 0 },
+ { "rfd77402" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rfd77402_id);
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index aa0d14a49d5e..629f83c37d59 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -835,9 +835,8 @@ static int sx9324_init_compensation(struct iio_dev *indio_dev)
int ret;
/* run the compensation phase on all channels */
- ret = regmap_update_bits(data->regmap, SX9324_REG_STAT2,
- SX9324_REG_STAT2_COMPSTAT_MASK,
- SX9324_REG_STAT2_COMPSTAT_MASK);
+ ret = regmap_set_bits(data->regmap, SX9324_REG_STAT2,
+ SX9324_REG_STAT2_COMPSTAT_MASK);
if (ret)
return ret;
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index 75a1c29f14eb..2b90bf45a201 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -672,9 +672,8 @@ static int sx9360_init_compensation(struct iio_dev *indio_dev)
int ret;
/* run the compensation phase on all channels */
- ret = regmap_update_bits(data->regmap, SX9360_REG_STAT,
- SX9360_REG_STAT_COMPSTAT_MASK,
- SX9360_REG_STAT_COMPSTAT_MASK);
+ ret = regmap_set_bits(data->regmap, SX9360_REG_STAT,
+ SX9360_REG_STAT_COMPSTAT_MASK);
if (ret)
return ret;
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 550e7d3cd5ee..92630812ece2 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -209,7 +209,7 @@ static int sx9500_inc_users(struct sx9500_data *data, int *counter,
/* Bit is already active, nothing to do. */
return 0;
- return regmap_update_bits(data->regmap, reg, bitmask, bitmask);
+ return regmap_set_bits(data->regmap, reg, bitmask);
}
static int sx9500_dec_users(struct sx9500_data *data, int *counter,
@@ -220,7 +220,7 @@ static int sx9500_dec_users(struct sx9500_data *data, int *counter,
/* There are more users, do not deactivate. */
return 0;
- return regmap_update_bits(data->regmap, reg, bitmask, 0);
+ return regmap_clear_bits(data->regmap, reg, bitmask);
}
static int sx9500_inc_chan_users(struct sx9500_data *data, int chan)
@@ -795,8 +795,8 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
int i, ret;
unsigned int val;
- ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- SX9500_CHAN_MASK, SX9500_CHAN_MASK);
+ ret = regmap_set_bits(data->regmap, SX9500_REG_PROX_CTRL0,
+ SX9500_CHAN_MASK);
if (ret < 0)
return ret;
@@ -815,8 +815,8 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
}
out:
- regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- SX9500_CHAN_MASK, 0);
+ regmap_clear_bits(data->regmap, SX9500_REG_PROX_CTRL0,
+ SX9500_CHAN_MASK);
return ret;
}
@@ -1043,8 +1043,8 @@ static const struct of_device_id sx9500_of_match[] = {
MODULE_DEVICE_TABLE(of, sx9500_of_match);
static const struct i2c_device_id sx9500_id[] = {
- {"sx9500", 0},
- { },
+ { "sx9500" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sx9500_id);
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index fe07d1444ac3..a95e9814aaf2 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -111,17 +111,16 @@ static int sx_common_enable_irq(struct sx_common_data *data, unsigned int irq)
{
if (!data->client->irq)
return 0;
- return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
- irq << data->chip_info->irq_msk_offset,
- irq << data->chip_info->irq_msk_offset);
+ return regmap_set_bits(data->regmap, data->chip_info->reg_irq_msk,
+ irq << data->chip_info->irq_msk_offset);
}
static int sx_common_disable_irq(struct sx_common_data *data, unsigned int irq)
{
if (!data->client->irq)
return 0;
- return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
- irq << data->chip_info->irq_msk_offset, 0);
+ return regmap_clear_bits(data->regmap, data->chip_info->reg_irq_msk,
+ irq << data->chip_info->irq_msk_offset);
}
static int sx_common_update_chan_en(struct sx_common_data *data,
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 2cea64bea909..8d4f3f849fe2 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -278,7 +278,7 @@ static int vl53l0x_probe(struct i2c_client *client)
}
static const struct i2c_device_id vl53l0x_id[] = {
- { "vl53l0x", 0 },
+ { "vl53l0x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vl53l0x_id);
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 24d19f3c7292..21f2cfc55bf8 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
@@ -432,10 +434,9 @@ __ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle
else
n_entries = fwnode_property_count_u64(fn, propname);
/* n_entries must be an even number */
- if (!n_entries || (n_entries % 2) != 0) {
- dev_err(dev, "Number of entries either 0 or not even\n");
- return ERR_PTR(-EINVAL);
- }
+ if (!n_entries || (n_entries % 2) != 0)
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Number of entries either 0 or not even\n");
new_custom = devm_kzalloc(dev, sizeof(*new_custom), GFP_KERNEL);
if (!new_custom)
@@ -443,19 +444,17 @@ __ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle
new_custom->size = n_entries * n_size;
/* check Steinhart size */
- if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
- dev_err(dev, "Steinhart sensors size(%zu) must be %u\n", new_custom->size,
- LTC2983_CUSTOM_STEINHART_SIZE);
- return ERR_PTR(-EINVAL);
- }
+ if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE)
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Steinhart sensors size(%zu) must be %u\n",
+ new_custom->size, LTC2983_CUSTOM_STEINHART_SIZE);
+
/* Check space on the table. */
if (st->custom_table_size + new_custom->size >
- (LTC2983_CUST_SENS_TBL_END_REG -
- LTC2983_CUST_SENS_TBL_START_REG) + 1) {
- dev_err(dev, "No space left(%d) for new custom sensor(%zu)",
- st->custom_table_size, new_custom->size);
- return ERR_PTR(-EINVAL);
- }
+ (LTC2983_CUST_SENS_TBL_END_REG - LTC2983_CUST_SENS_TBL_START_REG) + 1)
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "No space left(%d) for new custom sensor(%zu)\n",
+ st->custom_table_size, new_custom->size);
/* allocate the table */
if (is_steinhart)
@@ -688,21 +687,19 @@ ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data
LTC2983_THERMOCOUPLE_OC_CURR(3);
break;
default:
- dev_err(&st->spi->dev,
- "Invalid open circuit current:%u", oc_current);
- return ERR_PTR(-EINVAL);
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid open circuit current:%u\n",
+ oc_current);
}
thermo->sensor_config |= LTC2983_THERMOCOUPLE_OC_CHECK(1);
}
/* validate channel index */
if (!(thermo->sensor_config & LTC2983_THERMOCOUPLE_DIFF_MASK) &&
- sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev,
- "Invalid chann:%d for differential thermocouple",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chann:%d for differential thermocouple\n",
+ sensor->chan);
struct fwnode_handle *ref __free(fwnode_handle) =
fwnode_find_reference(child, "adi,cold-junction-handle", 0);
@@ -710,14 +707,13 @@ ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data
ref = NULL;
} else {
ret = fwnode_property_read_u32(ref, "reg", &thermo->cold_junction_chan);
- if (ret) {
+ if (ret)
/*
* This would be catched later but we can just return
* the error right away.
*/
- dev_err(&st->spi->dev, "Property reg must be given\n");
- return ERR_PTR(ret);
- }
+ return dev_err_ptr_probe(&st->spi->dev, ret,
+ "Property reg must be given\n");
}
/* check custom sensor */
@@ -753,16 +749,14 @@ ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
struct fwnode_handle *ref __free(fwnode_handle) =
fwnode_find_reference(child, "adi,rsense-handle", 0);
- if (IS_ERR(ref)) {
- dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_CAST(ref);
- }
+ if (IS_ERR(ref))
+ return dev_err_cast_probe(dev, ref,
+ "Property adi,rsense-handle missing or invalid\n");
ret = fwnode_property_read_u32(ref, "reg", &rtd->r_sense_chan);
- if (ret) {
- dev_err(dev, "Property reg must be given\n");
- return ERR_PTR(ret);
- }
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Property reg must be given\n");
ret = fwnode_property_read_u32(child, "adi,number-of-wires", &n_wires);
if (!ret) {
@@ -781,19 +775,19 @@ ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
rtd->sensor_config = LTC2983_RTD_N_WIRES(3);
break;
default:
- dev_err(dev, "Invalid number of wires:%u\n", n_wires);
- return ERR_PTR(-EINVAL);
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Invalid number of wires:%u\n",
+ n_wires);
}
}
if (fwnode_property_read_bool(child, "adi,rsense-share")) {
/* Current rotation is only available with rsense sharing */
if (fwnode_property_read_bool(child, "adi,current-rotate")) {
- if (n_wires == 2 || n_wires == 3) {
- dev_err(dev,
- "Rotation not allowed for 2/3 Wire RTDs");
- return ERR_PTR(-EINVAL);
- }
+ if (n_wires == 2 || n_wires == 3)
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Rotation not allowed for 2/3 Wire RTDs\n");
+
rtd->sensor_config |= LTC2983_RTD_C_ROTATE(1);
} else {
rtd->sensor_config |= LTC2983_RTD_R_SHARE(1);
@@ -816,29 +810,22 @@ ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
if (((rtd->sensor_config & LTC2983_RTD_KELVIN_R_SENSE_MASK)
== LTC2983_RTD_KELVIN_R_SENSE_MASK) &&
- (rtd->r_sense_chan <= min)) {
+ (rtd->r_sense_chan <= min))
/* kelvin rsense*/
- dev_err(dev,
- "Invalid rsense chann:%d to use in kelvin rsense",
- rtd->r_sense_chan);
-
- return ERR_PTR(-EINVAL);
- }
-
- if (sensor->chan < min || sensor->chan > max) {
- dev_err(dev, "Invalid chann:%d for the rtd config",
- sensor->chan);
-
- return ERR_PTR(-EINVAL);
- }
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Invalid rsense chann:%d to use in kelvin rsense\n",
+ rtd->r_sense_chan);
+
+ if (sensor->chan < min || sensor->chan > max)
+ return dev_err_ptr_probe(dev, -EINVAL,
+ "Invalid chann:%d for the rtd config\n",
+ sensor->chan);
} else {
/* same as differential case */
- if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev,
- "Invalid chann:%d for RTD", sensor->chan);
-
- return ERR_PTR(-EINVAL);
- }
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chann:%d for RTD\n",
+ sensor->chan);
}
/* check custom sensor */
@@ -886,10 +873,9 @@ ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
rtd->excitation_current = 0x08;
break;
default:
- dev_err(&st->spi->dev,
- "Invalid value for excitation current(%u)",
- excitation_current);
- return ERR_PTR(-EINVAL);
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid value for excitation current(%u)\n",
+ excitation_current);
}
}
@@ -913,16 +899,14 @@ ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *s
struct fwnode_handle *ref __free(fwnode_handle) =
fwnode_find_reference(child, "adi,rsense-handle", 0);
- if (IS_ERR(ref)) {
- dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_CAST(ref);
- }
+ if (IS_ERR(ref))
+ return dev_err_cast_probe(dev, ref,
+ "Property adi,rsense-handle missing or invalid\n");
ret = fwnode_property_read_u32(ref, "reg", &thermistor->r_sense_chan);
- if (ret) {
- dev_err(dev, "rsense channel must be configured...\n");
- return ERR_PTR(ret);
- }
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "rsense channel must be configured...\n");
if (fwnode_property_read_bool(child, "adi,single-ended")) {
thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
@@ -937,12 +921,10 @@ ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *s
}
/* validate channel index */
if (!(thermistor->sensor_config & LTC2983_THERMISTOR_DIFF_MASK) &&
- sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev,
- "Invalid chann:%d for differential thermistor",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chann:%d for differential thermistor\n",
+ sensor->chan);
/* check custom sensor */
if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART) {
@@ -981,12 +963,10 @@ ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *s
switch (excitation_current) {
case 0:
/* auto range */
- if (sensor->type >=
- LTC2983_SENSOR_THERMISTOR_STEINHART) {
- dev_err(&st->spi->dev,
- "Auto Range not allowed for custom sensors\n");
- return ERR_PTR(-EINVAL);
- }
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Auto Range not allowed for custom sensors\n");
+
thermistor->excitation_current = 0x0c;
break;
case 250:
@@ -1023,10 +1003,9 @@ ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *s
thermistor->excitation_current = 0x0b;
break;
default:
- dev_err(&st->spi->dev,
- "Invalid value for excitation current(%u)",
- excitation_current);
- return ERR_PTR(-EINVAL);
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid value for excitation current(%u)\n",
+ excitation_current);
}
}
@@ -1056,12 +1035,11 @@ ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *
/* validate channel index */
if (!(diode->sensor_config & LTC2983_DIODE_DIFF_MASK) &&
- sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev,
- "Invalid chann:%d for differential thermistor",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chann:%d for differential thermistor\n",
+ sensor->chan);
+
/* set common parameters */
diode->sensor.fault_handler = ltc2983_common_fault_handler;
diode->sensor.assign_chan = ltc2983_diode_assign_chan;
@@ -1083,10 +1061,9 @@ ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *
diode->excitation_current = 0x03;
break;
default:
- dev_err(&st->spi->dev,
- "Invalid value for excitation current(%u)",
- excitation_current);
- return ERR_PTR(-EINVAL);
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid value for excitation current(%u)\n",
+ excitation_current);
}
}
@@ -1111,17 +1088,15 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct fwnode_handle *child,
return ERR_PTR(-ENOMEM);
/* validate channel index */
- if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev, "Invalid chann:%d for r_sense",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chann:%d for r_sense\n",
+ sensor->chan);
ret = fwnode_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
- if (ret) {
- dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
- return ERR_PTR(-EINVAL);
- }
+ if (ret)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Property adi,rsense-val-milli-ohms missing\n");
/*
* Times 1000 because we have milli-ohms and __convert_to_raw
* expects scales of 1000000 which are used for all other
@@ -1149,12 +1124,11 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child,
if (fwnode_property_read_bool(child, "adi,single-ended"))
adc->single_ended = true;
- if (!adc->single_ended &&
- sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev, "Invalid chan:%d for differential adc\n",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ if (!adc->single_ended && sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chan:%d for differential adc\n",
+ sensor->chan);
+
/* set common parameters */
adc->sensor.assign_chan = ltc2983_adc_assign_chan;
adc->sensor.fault_handler = ltc2983_common_fault_handler;
@@ -1175,12 +1149,10 @@ static struct ltc2983_sensor *ltc2983_temp_new(struct fwnode_handle *child,
if (fwnode_property_read_bool(child, "adi,single-ended"))
temp->single_ended = true;
- if (!temp->single_ended &&
- sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
- dev_err(&st->spi->dev, "Invalid chan:%d for differential temp\n",
- sensor->chan);
- return ERR_PTR(-EINVAL);
- }
+ if (!temp->single_ended && sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN)
+ return dev_err_ptr_probe(&st->spi->dev, -EINVAL,
+ "Invalid chan:%d for differential temp\n",
+ sensor->chan);
temp->custom = __ltc2983_custom_sensor_new(st, child, "adi,custom-temp",
false, 4096, true);
@@ -1296,8 +1268,8 @@ static int ltc2983_reg_access(struct iio_dev *indio_dev,
if (readval)
return regmap_read(st->regmap, reg, readval);
- else
- return regmap_write(st->regmap, reg, writeval);
+
+ return regmap_write(st->regmap, reg, writeval);
}
static irqreturn_t ltc2983_irq_handler(int irq, void *data)
@@ -1330,10 +1302,9 @@ static int ltc2983_parse_fw(struct ltc2983_data *st)
device_property_read_u32(dev, "adi,filter-notch-freq", &st->filter_notch_freq);
st->num_channels = device_get_child_node_count(dev);
- if (!st->num_channels) {
- dev_err(&st->spi->dev, "At least one channel must be given!");
- return -EINVAL;
- }
+ if (!st->num_channels)
+ return dev_err_probe(&st->spi->dev, -EINVAL,
+ "At least one channel must be given!\n");
st->sensors = devm_kcalloc(dev, st->num_channels, sizeof(*st->sensors),
GFP_KERNEL);
@@ -1438,19 +1409,17 @@ static int ltc2983_eeprom_cmd(struct ltc2983_data *st, unsigned int cmd,
time = wait_for_completion_timeout(&st->completion,
msecs_to_jiffies(wait_time));
- if (!time) {
- dev_err(&st->spi->dev, "EEPROM command timed out\n");
- return -ETIMEDOUT;
- }
+ if (!time)
+ return dev_err_probe(&st->spi->dev, -ETIMEDOUT,
+ "EEPROM command timed out\n");
ret = regmap_read(st->regmap, status_reg, &val);
if (ret)
return ret;
- if (val & status_fail_mask) {
- dev_err(&st->spi->dev, "EEPROM command failed: 0x%02X\n", val);
- return -EINVAL;
- }
+ if (val & status_fail_mask)
+ return dev_err_probe(&st->spi->dev, -EINVAL,
+ "EEPROM command failed: 0x%02X\n", val);
return 0;
}
@@ -1464,10 +1433,9 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
ret = regmap_read_poll_timeout(st->regmap, LTC2983_STATUS_REG, status,
LTC2983_STATUS_UP(status) == 1, 25000,
25000 * 10);
- if (ret) {
- dev_err(&st->spi->dev, "Device startup timed out\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&st->spi->dev, ret,
+ "Device startup timed out\n");
ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
LTC2983_NOTCH_FREQ_MASK,
@@ -1583,10 +1551,9 @@ static int ltc2983_probe(struct spi_device *spi)
return -ENODEV;
st->regmap = devm_regmap_init_spi(spi, &ltc2983_regmap_config);
- if (IS_ERR(st->regmap)) {
- dev_err(&spi->dev, "Failed to initialize regmap\n");
- return PTR_ERR(st->regmap);
- }
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap\n");
mutex_init(&st->lock);
init_completion(&st->completion);
@@ -1624,10 +1591,9 @@ static int ltc2983_probe(struct spi_device *spi)
ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
IRQF_TRIGGER_RISING, st->info->name, st);
- if (ret) {
- dev_err(&spi->dev, "failed to request an irq, %d", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to request an irq\n");
if (st->info->has_eeprom) {
ret = ltc2983_eeprom_cmd(st, LTC2983_EEPROM_WRITE_CMD,
diff --git a/drivers/iio/temperature/max30208.c b/drivers/iio/temperature/max30208.c
index 48be03852cd8..720469f9dc36 100644
--- a/drivers/iio/temperature/max30208.c
+++ b/drivers/iio/temperature/max30208.c
@@ -34,7 +34,6 @@
struct max30208_data {
struct i2c_client *client;
- struct iio_dev *indio_dev;
struct mutex lock; /* Lock to prevent concurrent reads of temperature readings */
};
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
index 7a3eef5d5e75..f1bb0976273d 100644
--- a/drivers/iio/temperature/mcp9600.c
+++ b/drivers/iio/temperature/mcp9600.c
@@ -6,39 +6,123 @@
* Author: <andrew.hepp@ahepp.dev>
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
/* MCP9600 registers */
#define MCP9600_HOT_JUNCTION 0x0
#define MCP9600_COLD_JUNCTION 0x2
+#define MCP9600_STATUS 0x4
+#define MCP9600_STATUS_ALERT(x) BIT(x)
+#define MCP9600_ALERT_CFG1 0x8
+#define MCP9600_ALERT_CFG(x) (MCP9600_ALERT_CFG1 + (x - 1))
+#define MCP9600_ALERT_CFG_ENABLE BIT(0)
+#define MCP9600_ALERT_CFG_ACTIVE_HIGH BIT(2)
+#define MCP9600_ALERT_CFG_FALLING BIT(3)
+#define MCP9600_ALERT_CFG_COLD_JUNCTION BIT(4)
+#define MCP9600_ALERT_HYSTERESIS1 0xc
+#define MCP9600_ALERT_HYSTERESIS(x) (MCP9600_ALERT_HYSTERESIS1 + (x - 1))
+#define MCP9600_ALERT_LIMIT1 0x10
+#define MCP9600_ALERT_LIMIT(x) (MCP9600_ALERT_LIMIT1 + (x - 1))
+#define MCP9600_ALERT_LIMIT_MASK GENMASK(15, 2)
#define MCP9600_DEVICE_ID 0x20
/* MCP9600 device id value */
#define MCP9600_DEVICE_ID_MCP9600 0x40
-static const struct iio_chan_spec mcp9600_channels[] = {
+#define MCP9600_ALERT_COUNT 4
+
+#define MCP9600_MIN_TEMP_HOT_JUNCTION_MICRO -200000000
+#define MCP9600_MAX_TEMP_HOT_JUNCTION_MICRO 1800000000
+
+#define MCP9600_MIN_TEMP_COLD_JUNCTION_MICRO -40000000
+#define MCP9600_MAX_TEMP_COLD_JUNCTION_MICRO 125000000
+
+enum mcp9600_alert {
+ MCP9600_ALERT1,
+ MCP9600_ALERT2,
+ MCP9600_ALERT3,
+ MCP9600_ALERT4
+};
+
+static const char * const mcp9600_alert_name[MCP9600_ALERT_COUNT] = {
+ [MCP9600_ALERT1] = "alert1",
+ [MCP9600_ALERT2] = "alert2",
+ [MCP9600_ALERT3] = "alert3",
+ [MCP9600_ALERT4] = "alert4",
+};
+
+static const struct iio_event_spec mcp9600_events[] = {
{
- .type = IIO_TEMP,
- .address = MCP9600_HOT_JUNCTION,
- .info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
},
{
- .type = IIO_TEMP,
- .address = MCP9600_COLD_JUNCTION,
- .channel2 = IIO_MOD_TEMP_AMBIENT,
- .modified = 1,
- .info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
},
};
+#define MCP9600_CHANNELS(hj_num_ev, hj_ev_spec_off, cj_num_ev, cj_ev_spec_off) \
+ { \
+ { \
+ .type = IIO_TEMP, \
+ .address = MCP9600_HOT_JUNCTION, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .event_spec = &mcp9600_events[hj_ev_spec_off], \
+ .num_event_specs = hj_num_ev, \
+ }, \
+ { \
+ .type = IIO_TEMP, \
+ .address = MCP9600_COLD_JUNCTION, \
+ .channel2 = IIO_MOD_TEMP_AMBIENT, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .event_spec = &mcp9600_events[cj_ev_spec_off], \
+ .num_event_specs = cj_num_ev, \
+ }, \
+ }
+
+static const struct iio_chan_spec mcp9600_channels[][2] = {
+ MCP9600_CHANNELS(0, 0, 0, 0), /* Alerts: - - - - */
+ MCP9600_CHANNELS(1, 0, 0, 0), /* Alerts: 1 - - - */
+ MCP9600_CHANNELS(1, 1, 0, 0), /* Alerts: - 2 - - */
+ MCP9600_CHANNELS(2, 0, 0, 0), /* Alerts: 1 2 - - */
+ MCP9600_CHANNELS(0, 0, 1, 0), /* Alerts: - - 3 - */
+ MCP9600_CHANNELS(1, 0, 1, 0), /* Alerts: 1 - 3 - */
+ MCP9600_CHANNELS(1, 1, 1, 0), /* Alerts: - 2 3 - */
+ MCP9600_CHANNELS(2, 0, 1, 0), /* Alerts: 1 2 3 - */
+ MCP9600_CHANNELS(0, 0, 1, 1), /* Alerts: - - - 4 */
+ MCP9600_CHANNELS(1, 0, 1, 1), /* Alerts: 1 - - 4 */
+ MCP9600_CHANNELS(1, 1, 1, 1), /* Alerts: - 2 - 4 */
+ MCP9600_CHANNELS(2, 0, 1, 1), /* Alerts: 1 2 - 4 */
+ MCP9600_CHANNELS(0, 0, 2, 0), /* Alerts: - - 3 4 */
+ MCP9600_CHANNELS(1, 0, 2, 0), /* Alerts: 1 - 3 4 */
+ MCP9600_CHANNELS(1, 1, 2, 0), /* Alerts: - 2 3 4 */
+ MCP9600_CHANNELS(2, 0, 2, 0), /* Alerts: 1 2 3 4 */
+};
+
struct mcp9600_data {
struct i2c_client *client;
};
@@ -80,15 +164,261 @@ static int mcp9600_read_raw(struct iio_dev *indio_dev,
}
}
+static int mcp9600_get_alert_index(int channel2, enum iio_event_direction dir)
+{
+ if (channel2 == IIO_MOD_TEMP_AMBIENT) {
+ if (dir == IIO_EV_DIR_RISING)
+ return MCP9600_ALERT3;
+ else
+ return MCP9600_ALERT4;
+ } else {
+ if (dir == IIO_EV_DIR_RISING)
+ return MCP9600_ALERT1;
+ else
+ return MCP9600_ALERT2;
+ }
+}
+
+static int mcp9600_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int i, ret;
+
+ i = mcp9600_get_alert_index(chan->channel2, dir);
+ ret = i2c_smbus_read_byte_data(client, MCP9600_ALERT_CFG(i + 1));
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(MCP9600_ALERT_CFG_ENABLE, ret);
+}
+
+static int mcp9600_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int i, ret;
+
+ i = mcp9600_get_alert_index(chan->channel2, dir);
+ ret = i2c_smbus_read_byte_data(client, MCP9600_ALERT_CFG(i + 1));
+ if (ret < 0)
+ return ret;
+
+ if (state)
+ ret |= MCP9600_ALERT_CFG_ENABLE;
+ else
+ ret &= ~MCP9600_ALERT_CFG_ENABLE;
+
+ return i2c_smbus_write_byte_data(client, MCP9600_ALERT_CFG(i + 1), ret);
+}
+
+static int mcp9600_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ s32 ret;
+ int i;
+
+ i = mcp9600_get_alert_index(chan->channel2, dir);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = i2c_smbus_read_word_swapped(client, MCP9600_ALERT_LIMIT(i + 1));
+ if (ret < 0)
+ return ret;
+ /*
+ * Temperature is stored in two’s complement format in
+ * bits(15:2), LSB is 0.25 degree celsius.
+ */
+ *val = sign_extend32(FIELD_GET(MCP9600_ALERT_LIMIT_MASK, ret), 13);
+ *val2 = 4;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = i2c_smbus_read_byte_data(client, MCP9600_ALERT_HYSTERESIS(i + 1));
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp9600_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int s_val, i;
+ s16 thresh;
+ u8 hyst;
+
+ i = mcp9600_get_alert_index(chan->channel2, dir);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ /* Scale value to include decimal part into calculations */
+ s_val = (val < 0) ? ((val * 1000000) - val2) :
+ ((val * 1000000) + val2);
+ if (chan->channel2 == IIO_MOD_TEMP_AMBIENT) {
+ s_val = max(s_val, MCP9600_MIN_TEMP_COLD_JUNCTION_MICRO);
+ s_val = min(s_val, MCP9600_MAX_TEMP_COLD_JUNCTION_MICRO);
+ } else {
+ s_val = max(s_val, MCP9600_MIN_TEMP_HOT_JUNCTION_MICRO);
+ s_val = min(s_val, MCP9600_MAX_TEMP_HOT_JUNCTION_MICRO);
+ }
+
+ /*
+ * Shift length 4 bits = 2(15:2) + 2(0.25 LSB), temperature is
+ * stored in two’s complement format.
+ */
+ thresh = (s16)(s_val / (1000000 >> 4));
+ return i2c_smbus_write_word_swapped(client,
+ MCP9600_ALERT_LIMIT(i + 1),
+ thresh);
+ case IIO_EV_INFO_HYSTERESIS:
+ hyst = min(abs(val), 255);
+ return i2c_smbus_write_byte_data(client,
+ MCP9600_ALERT_HYSTERESIS(i + 1),
+ hyst);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info mcp9600_info = {
.read_raw = mcp9600_read_raw,
+ .read_event_config = mcp9600_read_event_config,
+ .write_event_config = mcp9600_write_event_config,
+ .read_event_value = mcp9600_read_thresh,
+ .write_event_value = mcp9600_write_thresh,
};
+static irqreturn_t mcp9600_alert_handler(void *private,
+ enum mcp9600_alert alert,
+ enum iio_modifier mod,
+ enum iio_event_direction dir)
+{
+ struct iio_dev *indio_dev = private;
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, MCP9600_STATUS);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (!(ret & MCP9600_STATUS_ALERT(alert)))
+ return IRQ_NONE;
+
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0, mod, IIO_EV_TYPE_THRESH,
+ dir),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mcp9600_alert1_handler(int irq, void *private)
+{
+ return mcp9600_alert_handler(private, MCP9600_ALERT1, IIO_NO_MOD,
+ IIO_EV_DIR_RISING);
+}
+
+static irqreturn_t mcp9600_alert2_handler(int irq, void *private)
+{
+ return mcp9600_alert_handler(private, MCP9600_ALERT2, IIO_NO_MOD,
+ IIO_EV_DIR_FALLING);
+}
+
+static irqreturn_t mcp9600_alert3_handler(int irq, void *private)
+{
+ return mcp9600_alert_handler(private, MCP9600_ALERT3,
+ IIO_MOD_TEMP_AMBIENT, IIO_EV_DIR_RISING);
+}
+
+static irqreturn_t mcp9600_alert4_handler(int irq, void *private)
+{
+ return mcp9600_alert_handler(private, MCP9600_ALERT4,
+ IIO_MOD_TEMP_AMBIENT, IIO_EV_DIR_FALLING);
+}
+
+static irqreturn_t (*mcp9600_alert_handler_func[MCP9600_ALERT_COUNT]) (int, void *) = {
+ mcp9600_alert1_handler,
+ mcp9600_alert2_handler,
+ mcp9600_alert3_handler,
+ mcp9600_alert4_handler,
+};
+
+static int mcp9600_probe_alerts(struct iio_dev *indio_dev)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int irq_type;
+ int ret, irq, i;
+ u8 val, ch_sel;
+
+ /*
+ * alert1: hot junction, rising temperature
+ * alert2: hot junction, falling temperature
+ * alert3: cold junction, rising temperature
+ * alert4: cold junction, falling temperature
+ */
+ ch_sel = 0;
+ for (i = 0; i < MCP9600_ALERT_COUNT; i++) {
+ irq = fwnode_irq_get_byname(fwnode, mcp9600_alert_name[i]);
+ if (irq <= 0)
+ continue;
+
+ val = 0;
+ irq_type = irq_get_trigger_type(irq);
+ if (irq_type == IRQ_TYPE_EDGE_RISING)
+ val |= MCP9600_ALERT_CFG_ACTIVE_HIGH;
+
+ if (i == MCP9600_ALERT2 || i == MCP9600_ALERT4)
+ val |= MCP9600_ALERT_CFG_FALLING;
+
+ if (i == MCP9600_ALERT3 || i == MCP9600_ALERT4)
+ val |= MCP9600_ALERT_CFG_COLD_JUNCTION;
+
+ ret = i2c_smbus_write_byte_data(client,
+ MCP9600_ALERT_CFG(i + 1),
+ val);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ mcp9600_alert_handler_func[i],
+ IRQF_ONESHOT, "mcp9600",
+ indio_dev);
+ if (ret)
+ return ret;
+
+ ch_sel |= BIT(i);
+ }
+
+ return ch_sel;
+}
+
static int mcp9600_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct mcp9600_data *data;
- int ret;
+ int ret, ch_sel;
ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
if (ret < 0)
@@ -104,11 +434,15 @@ static int mcp9600_probe(struct i2c_client *client)
data = iio_priv(indio_dev);
data->client = client;
+ ch_sel = mcp9600_probe_alerts(indio_dev);
+ if (ch_sel < 0)
+ return ch_sel;
+
indio_dev->info = &mcp9600_info;
indio_dev->name = "mcp9600";
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = mcp9600_channels;
- indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels);
+ indio_dev->channels = mcp9600_channels[ch_sel];
+ indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels[ch_sel]);
return devm_iio_device_register(&client->dev, indio_dev);
}
@@ -135,6 +469,7 @@ static struct i2c_driver mcp9600_driver = {
};
module_i2c_driver(mcp9600_driver);
+MODULE_AUTHOR("Dimitri Fedrau <dima.fedrau@gmail.com>");
MODULE_AUTHOR("Andrew Hepp <andrew.hepp@ahepp.dev>");
MODULE_DESCRIPTION("Microchip MCP9600 thermocouple EMF converter driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 8a57be108620..ae4ea587e7f9 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -334,8 +334,8 @@ static int mlx90632_perform_measurement(struct mlx90632_data *data)
unsigned int reg_status;
int ret;
- ret = regmap_update_bits(data->regmap, MLX90632_REG_STATUS,
- MLX90632_STAT_DATA_RDY, 0);
+ ret = regmap_clear_bits(data->regmap, MLX90632_REG_STATUS,
+ MLX90632_STAT_DATA_RDY);
if (ret < 0)
return ret;
@@ -1279,7 +1279,7 @@ static int mlx90632_probe(struct i2c_client *client)
}
static const struct i2c_device_id mlx90632_id[] = {
- { "mlx90632", 0 },
+ { "mlx90632" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mlx90632_id);
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index 1f5c962c1818..f7f88498ba0e 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
"failed to allocate regmap\n");
regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
- if (IS_ERR(regmap))
- return dev_err_probe(&client->dev, PTR_ERR(regmap),
- "failed to allocate regmap\n");
+ if (IS_ERR(regmap_ee))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
+ "failed to allocate EEPROM regmap\n");
mlx90635 = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 3a3904fe138c..6d8d661f0c82 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -280,7 +280,7 @@ static const struct of_device_id tmp006_of_match[] = {
MODULE_DEVICE_TABLE(of, tmp006_of_match);
static const struct i2c_device_id tmp006_id[] = {
- { "tmp006", 0 },
+ { "tmp006" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp006_id);
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index decef6896362..9bdfa9423492 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -563,7 +563,7 @@ static const struct of_device_id tmp007_of_match[] = {
MODULE_DEVICE_TABLE(of, tmp007_of_match);
static const struct i2c_device_id tmp007_id[] = {
- { "tmp007", 0 },
+ { "tmp007" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp007_id);
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 53ef56fbfe1d..9213761c5d18 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -206,7 +206,7 @@ static int tsys01_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id tsys01_id[] = {
- {"tsys01", 0},
+ { "tsys01" },
{}
};
MODULE_DEVICE_TABLE(i2c, tsys01_id);
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index 6191db92ef9a..2b4959d6e467 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -168,7 +168,7 @@ static int tsys02d_probe(struct i2c_client *client)
}
static const struct i2c_device_id tsys02d_id[] = {
- {"tsys02d", 0},
+ { "tsys02d" },
{}
};
MODULE_DEVICE_TABLE(i2c, tsys02d_id);
diff --git a/drivers/iio/test/iio-test-gts.c b/drivers/iio/test/iio-test-gts.c
index cf7ab773ea0b..5f16a7b5e6d4 100644
--- a/drivers/iio/test/iio-test-gts.c
+++ b/drivers/iio/test/iio-test-gts.c
@@ -70,6 +70,7 @@
*/
static struct iio_gts gts;
+/* Keep the gain and time tables unsorted to test the sorting */
static const struct iio_gain_sel_pair gts_test_gains[] = {
GAIN_SCALE_GAIN(1, TEST_GSEL_1),
GAIN_SCALE_GAIN(4, TEST_GSEL_4),
@@ -79,16 +80,17 @@ static const struct iio_gain_sel_pair gts_test_gains[] = {
GAIN_SCALE_GAIN(256, TEST_GSEL_256),
GAIN_SCALE_GAIN(512, TEST_GSEL_512),
GAIN_SCALE_GAIN(1024, TEST_GSEL_1024),
- GAIN_SCALE_GAIN(2048, TEST_GSEL_2048),
GAIN_SCALE_GAIN(4096, TEST_GSEL_4096),
+ GAIN_SCALE_GAIN(2048, TEST_GSEL_2048),
#define HWGAIN_MAX 4096
};
static const struct iio_itime_sel_mul gts_test_itimes[] = {
- GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 8),
- GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
GAIN_SCALE_ITIME_US(100 * 1000, TEST_TSEL_100, 2),
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 8),
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 8),
GAIN_SCALE_ITIME_US(50 * 1000, TEST_TSEL_50, 1),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
#define TIMEGAIN_MAX 8
};
#define TOTAL_GAIN_MAX (HWGAIN_MAX * TIMEGAIN_MAX)
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index d76444030a28..0684329956d9 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -158,7 +158,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
regmap_write(priv->regmap, TIM_PSC, prescaler);
regmap_write(priv->regmap, TIM_ARR, prd - 1);
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
+ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
/* Force master mode to update mode */
if (stm32_timer_is_trgo2_name(trig->name))
@@ -169,10 +169,10 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
0x2 << TIM_CR2_MMS_SHIFT);
/* Make sure that registers are updated */
- regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+ regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
/* Enable controller */
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
+ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
mutex_unlock(&priv->lock);
return 0;
@@ -189,19 +189,19 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv,
mutex_lock(&priv->lock);
/* Stop timer */
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
/* Force disable master mode */
if (stm32_timer_is_trgo2_name(trig->name))
- regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2);
else
- regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS);
/* Make sure that registers are updated */
- regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+ regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
if (priv->enabled) {
priv->enabled = false;
@@ -498,11 +498,9 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
priv->enabled = true;
clk_enable(priv->clk);
}
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
- TIM_CR1_CEN);
+ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
} else {
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
- 0);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
if (priv->enabled) {
priv->enabled = false;
clk_disable(priv->clk);
@@ -555,7 +553,7 @@ static int stm32_set_trigger_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
- regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, TIM_SMCR_SMS);
+ regmap_set_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS);
return 0;
}
@@ -683,7 +681,7 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
return ret;
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
regmap_write(priv->regmap, TIM_ARR, preset);
return len;
@@ -757,9 +755,9 @@ static void stm32_timer_detect_trgo2(struct stm32_timer_trigger *priv)
* Master mode selection 2 bits can only be written and read back when
* timer supports it.
*/
- regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, TIM_CR2_MMS2);
+ regmap_set_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2);
regmap_read(priv->regmap, TIM_CR2, &val);
- regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2);
priv->has_trgo2 = !!val;
}
@@ -820,7 +818,7 @@ static void stm32_timer_trigger_remove(struct platform_device *pdev)
/* Check if nobody else use the timer, then disable it */
regmap_read(priv->regmap, TIM_CCER, &val);
if (!(val & TIM_CCER_CCXE))
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
if (priv->enabled)
clk_disable(priv->clk);
@@ -841,7 +839,7 @@ static int stm32_timer_trigger_suspend(struct device *dev)
regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr);
/* Disable the timer */
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
clk_disable(priv->clk);
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9dca451ed522..6974922e5609 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
-#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -168,7 +166,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 4f13423ecdbd..887b09dd86e7 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2366c46eebc8..43660c831b22 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
-err:
- mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
+err:
+ mlx5r_macsec_dealloc_gids(dev);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ecc111ed5d86..d3c1f63791a2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2,
(ent->rb_key.access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
new = &((*new)->rb_left);
if (cmp < 0)
new = &((*new)->rb_right);
- if (cmp == 0) {
- mutex_unlock(&cache->rb_lock);
+ if (cmp == 0)
return -EEXIST;
- }
}
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.rb_key = ent->rb_key;
+ mr->mmkey.cacheable = true;
init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
@@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
- mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index a056ea835da5..84be0c3d5699 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
int err;
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC &&
init_attr->srq_type != IB_SRQT_TM)
return -EOPNOTSUPP;
- /* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= max_srq_wqes) {
- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
- init_attr->attr.max_wr,
- max_srq_wqes);
+ /* Sanity check SRQ and sge size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes ||
+ init_attr->attr.max_sge > max_sge_sz) {
+ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
+ init_attr->attr.max_wr, max_srq_wqes,
+ init_attr->attr.max_sge, max_sge_sz);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c6a7fa3054fa..6596a85723c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index c7d4d8ab5a09..de6238ee4379 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 4fba28b1a1e7..3ef66e458544 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -673,7 +673,6 @@ static struct parport_driver db9_parport_driver = {
.name = "db9",
.match_port = db9_attach,
.detach = db9_detach,
- .devmodel = true,
};
static int __init db9_init(void)
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 41d5dac05448..7ed4892749fa 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -1016,7 +1016,6 @@ static struct parport_driver gc_parport_driver = {
.name = "gamecon",
.match_port = gc_attach,
.detach = gc_detach,
- .devmodel = true,
};
static int __init gc_init(void)
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index dfb9c684651f..baa14acaa3a8 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -274,7 +274,6 @@ static struct parport_driver tgfx_parport_driver = {
.name = "turbografx",
.match_port = tgfx_attach,
.detach = tgfx_detach,
- .devmodel = true,
};
static int __init tgfx_init(void)
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 27d95d6cf56e..59eea813f258 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -293,7 +293,6 @@ static struct parport_driver walkera0701_parport_driver = {
.name = "walkera0701",
.match_port = walkera0701_attach,
.detach = walkera0701_detach,
- .devmodel = true,
};
module_parport_driver(walkera0701_parport_driver);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 70f0654c58b6..2b8370ecf42a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -209,6 +209,7 @@ static const struct xpad_device {
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e38229404b4..b4723ea395eb 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1477,15 +1477,46 @@ static void elantech_disconnect(struct psmouse *psmouse)
}
/*
+ * Some hw_version 4 models fail to properly activate absolute mode on
+ * resume without going through disable/enable cycle.
+ */
+static const struct dmi_system_id elantech_needs_reenable[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Lenovo N24 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* Put the touchpad back into absolute mode when reconnecting
*/
static int elantech_reconnect(struct psmouse *psmouse)
{
+ int err;
+
psmouse_reset(psmouse);
if (elantech_detect(psmouse, 0))
return -1;
+ if (dmi_check_system(elantech_needs_reenable)) {
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad back into absolute mode.\n");
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index dfc6c581873b..5b50475ec414 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
#define SERIO_QUIRK_PROBE_DEFER BIT(5)
#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
#define SERIO_QUIRK_RESET_NEVER BIT(7)
-#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DIRECT BIT(8)
#define SERIO_QUIRK_DUMBKBD BIT(9)
#define SERIO_QUIRK_NOLOOP BIT(10)
#define SERIO_QUIRK_NOTIMEOUT BIT(11)
@@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ {
+ /*
+ * The Ayaneo Kun is a handheld device where some the buttons
+ * are handled by an AT keyboard. The keyboard is usually
+ * detected as raw, but sometimes, usually after a cold boot,
+ * it is detected as translated. Make sure that the keyboard
+ * is always in raw mode.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_DIRECT)
+ },
{ }
};
@@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
if (quirks & SERIO_QUIRK_RESET_NEVER)
i8042_reset = I8042_RESET_NEVER;
}
- if (quirks & SERIO_QUIRK_DIECT)
+ if (quirks & SERIO_QUIRK_DIRECT)
i8042_direct = true;
if (quirks & SERIO_QUIRK_DUMBKBD)
i8042_dumbkbd = true;
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 0d54895428f5..ac1f9ea3f969 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -218,6 +218,5 @@ static struct parport_driver parkbd_parport_driver = {
.name = "parkbd",
.match_port = parkbd_attach,
.detach = parkbd_detach,
- .devmodel = true,
};
module_parport_driver(parkbd_parport_driver);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index d2bbb436a77d..4d13db13b9e5 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1111,6 +1111,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
+static const struct spi_device_id ads7846_spi_ids[] = {
+ { "tsc2046", 7846 },
+ { "ads7843", 7843 },
+ { "ads7845", 7845 },
+ { "ads7846", 7846 },
+ { "ads7873", 7873 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
+
static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
{
struct ads7846_platform_data *pdata;
@@ -1386,10 +1396,10 @@ static struct spi_driver ads7846_driver = {
},
.probe = ads7846_probe,
.remove = ads7846_remove,
+ .id_table = ads7846_spi_ids,
};
module_spi_driver(ads7846_driver);
MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:ads7846");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 31ffdc2a93f3..79bdb2b10949 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
- if (error >= 0 && error != ILI251X_DATA_SIZE2)
- error = -EIO;
+ if (error >= 0)
+ error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
}
return error;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index bbd366dcb69a..6a42b27c4599 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -71,7 +71,6 @@ struct silead_ts_data {
struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
- u32 max_fingers;
u32 chip_id;
struct input_mt_pos pos[SILEAD_MAX_FINGERS];
int slots[SILEAD_MAX_FINGERS];
@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
touchscreen_parse_properties(data->input, true, &data->prop);
silead_apply_efi_fw_min_max(data);
- input_mt_init_slots(data->input, data->max_fingers,
+ input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
INPUT_MT_TRACK);
@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
return;
}
- if (buf[0] > data->max_fingers) {
+ if (buf[0] > SILEAD_MAX_FINGERS) {
dev_warn(dev, "More touches reported then supported %d > %d\n",
- buf[0], data->max_fingers);
- buf[0] = data->max_fingers;
+ buf[0], SILEAD_MAX_FINGERS);
+ buf[0] = SILEAD_MAX_FINGERS;
}
if (silead_ts_handle_pen_data(data, buf))
@@ -315,7 +314,6 @@ sync:
static int silead_ts_init(struct i2c_client *client)
{
- struct silead_ts_data *data = i2c_get_clientdata(client);
int error;
error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
- data->max_fingers);
+ SILEAD_MAX_FINGERS);
if (error)
goto i2c_write_err;
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
const char *str;
int error;
- error = device_property_read_u32(dev, "silead,max-fingers",
- &data->max_fingers);
- if (error) {
- dev_dbg(dev, "Max fingers read error %d\n", error);
- data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
- }
-
error = device_property_read_string(dev, "firmware-name", &str);
if (!error)
snprintf(data->fw_name, sizeof(data->fw_name),
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 2fde1302a584..2d5945c982bd 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void)
static inline bool amd_iommu_gt_ppr_supported(void)
{
return (check_feature(FEATURE_GT) &&
- check_feature(FEATURE_PPR));
+ check_feature(FEATURE_PPR) &&
+ check_feature(FEATURE_EPHSUP));
}
static inline u64 iommu_virt_to_phys(void *vaddr)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index a18e74878f68..c89d85b54a1a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void)
}
}
+static void __init free_sysfs(struct amd_iommu *iommu)
+{
+ if (iommu->iommu.dev) {
+ iommu_device_unregister(&iommu->iommu);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_sysfs(iommu);
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
@@ -2734,6 +2743,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -3353,7 +3363,7 @@ int amd_iommu_reenable(int mode)
return 0;
}
-int __init amd_iommu_enable_faulting(unsigned int cpu)
+int amd_iommu_enable_faulting(unsigned int cpu)
{
/* We enable MSI later when PCI is initialized */
return 0;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 52d83730a22a..b19e8c0f48fa 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- struct pci_dev *pdev;
int ret = 0;
/* Update data structures */
@@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ /* Setup GCR3 table */
if (pdom_is_sva_capable(domain)) {
ret = init_gcr3_table(dev_data, domain);
if (ret)
return ret;
-
- if (pdev) {
- pdev_enable_caps(pdev);
-
- /*
- * Device can continue to function even if IOPF
- * enablement failed. Hence in error path just
- * disable device PRI support.
- */
- if (amd_iommu_iopf_add_device(iommu, dev_data))
- pdev_disable_cap_pri(pdev);
- }
- } else if (pdev) {
- pdev_enable_cap_ats(pdev);
}
- /* Update device table */
- amd_iommu_dev_update_dte(dev_data, true);
-
return ret;
}
@@ -2079,6 +2061,12 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ /* Clear DTE and flush the entry */
+ amd_iommu_dev_update_dte(dev_data, false);
+
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
+
/* Clear GCR3 table */
if (pdom_is_sva_capable(domain))
destroy_gcr3_table(dev_data, domain);
@@ -2087,12 +2075,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
dev_data->domain = NULL;
list_del(&dev_data->list);
- /* Clear DTE and flush the entry */
- amd_iommu_dev_update_dte(dev_data, false);
-
- /* Flush IOTLB and wait for the flushes to finish */
- amd_iommu_domain_flush_all(domain);
-
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1;
@@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev)
do_detach(dev_data);
+out:
+ spin_unlock(&dev_data->lock);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
/* Remove IOPF handler */
if (ppr)
amd_iommu_iopf_remove_device(iommu, dev_data);
@@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev)
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
-out:
- spin_unlock(&dev_data->lock);
-
- spin_unlock_irqrestore(&domain->lock, flags);
}
static struct iommu_device *amd_iommu_probe_device(struct device *dev)
@@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+ struct pci_dev *pdev;
int ret;
/*
@@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
#endif
- iommu_completion_wait(iommu);
+ pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ if (pdev && pdom_is_sva_capable(domain)) {
+ pdev_enable_caps(pdev);
+
+ /*
+ * Device can continue to function even if IOPF
+ * enablement failed. Hence in error path just
+ * disable device PRI support.
+ */
+ if (amd_iommu_iopf_add_device(iommu, dev_data))
+ pdev_disable_cap_pri(pdev);
+ } else if (pdev) {
+ pdev_enable_cap_ats(pdev);
+ }
+
+ /* Update device table */
+ amd_iommu_dev_update_dte(dev_data, true);
return ret;
}
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 091423bb8aac..7c67d69f0b8c 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu)
if (iommu->iopf_queue)
return ret;
- snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
- "amdiommu-%#x-iopfq",
+ snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x",
PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
@@ -249,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- unsigned long flags;
int ret = 0;
if (!dev_data->pri_enabled)
return ret;
- raw_spin_lock_irqsave(&iommu->lock, flags);
-
- if (!iommu->iopf_queue) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!iommu->iopf_queue)
+ return -EINVAL;
ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
if (ret)
- goto out_unlock;
+ return ret;
dev_data->ppr = true;
-
-out_unlock:
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
- return ret;
+ return 0;
}
/* Its assumed that caller has verified that device was added to iopf queue */
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&iommu->lock, flags);
-
iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
dev_data->ppr = false;
-
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f731e4b2a417..43520e7275cc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
/* Check the domain allows at least some access to the device... */
if (map) {
- dma_addr_t base = dma_range_map_min(map);
- if (base > domain->geometry.aperture_end ||
+ if (dma_range_map_min(map) > domain->geometry.aperture_end ||
dma_range_map_max(map) < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT;
}
- /* ...then finally give it a kicking to make sure it fits */
- base_pfn = max(base, domain->geometry.aperture_start) >> order;
}
+ /* ...then finally give it a kicking to make sure it fits */
+ base_pfn = max_t(unsigned long, base_pfn,
+ domain->geometry.aperture_start >> order);
/* start_pfn is always nonzero for an already-initialised domain */
mutex_lock(&cookie->mutex);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2e9811bf2a4e..fd11a080380c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2114,12 +2114,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
return ret;
- ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
- if (ret) {
- domain_detach_iommu(domain, iommu);
- return ret;
- }
-
info->domain = domain;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
@@ -2137,15 +2131,21 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
else
ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- if (ret) {
- device_block_translation(dev);
- return ret;
- }
+ if (ret)
+ goto out_block_translation;
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
iommu_enable_pci_caps(info);
+ ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
+ if (ret)
+ goto out_block_translation;
+
return 0;
+
+out_block_translation:
+ device_block_translation(dev);
+ return ret;
}
/**
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 40ebf1726393..3c755d5dad6e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
if (!info->map)
return -EINVAL;
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
-
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_ATOMIC);
- if (!maps) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!maps)
+ return -ENOMEM;
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* Get our private copy of the mapping information */
@@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
its_dev->event_map.nr_vlpis++;
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map;
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
map = get_vlpi_map(d);
- if (!its_dev->event_map.vm || !map) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !map)
+ return -EINVAL;
/* Copy our mapping information to the incoming request */
*info->map = *map;
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
- if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
@@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d)
kfree(its_dev->event_map.vlpi_maps);
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
@@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
+ guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index c7ddebf312ad..b1f2080be2be 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include <asm/numa.h>
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
@@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
int node;
if (cpu_has_flatmode)
- node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
else
node = eiointc_priv[nr_pics - 1]->node;
@@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_handle;
if (cpu_has_flatmode)
- node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
else
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index e4b33aed1c97..7c4fe7ab4b83 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -28,7 +28,7 @@
#define LIOINTC_INTC_CHIP_START 0x20
-#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
@@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
goto out_free_priv;
for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
priv->handler[i].parent_int_map = parent_int_map[i];
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 9e71c4428814..4f3a12383a1e 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
- struct fwnode_handle *fn;
struct acpi_madt_rintc *rintc;
+ struct fwnode_handle *fn;
+ int rc;
rintc = (struct acpi_madt_rintc *)header;
@@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn, &riscv_intc_chip);
+ rc = riscv_intc_init_common(fn, &riscv_intc_chip);
+ if (rc)
+ irq_domain_free_fwnode(fn);
+
+ return rc;
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 8fb183ced1e7..9e22f7e378f5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -85,7 +85,7 @@ struct plic_handler {
struct plic_priv *priv;
};
static int plic_parent_irq __ro_after_init;
-static bool plic_cpuhp_setup_done __ro_after_init;
+static bool plic_global_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
@@ -487,10 +487,8 @@ static int plic_probe(struct platform_device *pdev)
unsigned long plic_quirks = 0;
struct plic_handler *handler;
u32 nr_irqs, parent_hwirq;
- struct irq_domain *domain;
struct plic_priv *priv;
irq_hw_number_t hwirq;
- bool cpuhp_setup;
if (is_of_node(dev->fwnode)) {
const struct of_device_id *id;
@@ -549,14 +547,6 @@ static int plic_probe(struct platform_device *pdev)
continue;
}
- /* Find parent domain and register chained handler */
- domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
- if (!plic_parent_irq && domain) {
- plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
- if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
- }
-
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -597,25 +587,35 @@ done:
goto fail_cleanup_contexts;
/*
- * We can have multiple PLIC instances so setup cpuhp state
+ * We can have multiple PLIC instances so setup global state
* and register syscore operations only once after context
* handlers of all online CPUs are initialized.
*/
- if (!plic_cpuhp_setup_done) {
- cpuhp_setup = true;
+ if (!plic_global_setup_done) {
+ struct irq_domain *domain;
+ bool global_setup = true;
+
for_each_online_cpu(cpu) {
handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present) {
- cpuhp_setup = false;
+ global_setup = false;
break;
}
}
- if (cpuhp_setup) {
+
+ if (global_setup) {
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (domain)
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ plic_global_setup_done = true;
}
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 24fcff682b24..ba1be15cfd8e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
- /*
- * If no default trigger was given and hw_control_trigger is set,
- * make it the default trigger.
- */
- if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
- led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 7c90bac3de21..4acf5612487c 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -850,7 +850,6 @@ static int xlnx_mbox_init_sgi(struct platform_device *pdev,
return ret;
}
- irq_to_desc(pdata->virq_sgi);
irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
/* Setup function for the CPU hot-plug cases */
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 1ae37e693de0..a5f8ab9a0910 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -8,11 +8,6 @@
#include "mcb-internal.h"
-struct mcb_parse_priv {
- phys_addr_t mapbase;
- void __iomem *base;
-};
-
#define for_each_chameleon_cell(dtype, p) \
for ((dtype) = get_next_dtype((p)); \
(dtype) != CHAMELEON_DTYPE_END; \
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 53d9202ff9a7..3b634ea318c7 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -45,6 +45,14 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_master(pdev);
+ flags = pci_resource_flags(pdev, 0);
+ if (flags & IORESOURCE_IO) {
+ ret = -ENOTSUPP;
+ dev_err(&pdev->dev,
+ "IO mapped PCI devices are not supported\n");
+ goto out_disable;
+ }
+
priv->mapbase = pci_resource_start(pdev, 0);
if (!priv->mapbase) {
dev_err(&pdev->dev, "No PCI resource\n");
@@ -68,14 +76,6 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_disable;
}
- flags = pci_resource_flags(pdev, 0);
- if (flags & IORESOURCE_IO) {
- ret = -ENOTSUPP;
- dev_err(&pdev->dev,
- "IO mapped PCI devices are not supported\n");
- goto out_disable;
- }
-
pci_set_drvdata(pdev, priv);
priv->bus = mcb_alloc_bus(&pdev->dev);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index ce13c272c387..48ce750bf70a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b)
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{
- BUG_ON(!ca->set->gc_mark_valid);
-
- return (!GC_MARK(b) ||
- GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
- !atomic_read(&b->pin) &&
- can_inc_bucket_gen(b);
+ return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
+ ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
+ !atomic_read(&b->pin) && can_inc_bucket_gen(b));
}
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
+ b->reclaimable_in_gc = 0;
}
static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg)
*/
retry_invalidate:
- allocator_wait(ca, ca->set->gc_mark_valid &&
- !ca->invalidate_needs_gc);
+ allocator_wait(ca, !ca->invalidate_needs_gc);
invalidate_buckets(ca);
/*
@@ -501,8 +498,8 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
ca = c->cache;
b = bch_bucket_alloc(ca, reserve, wait);
- if (b == -1)
- goto err;
+ if (b < 0)
+ return -1;
k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
@@ -511,10 +508,6 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
SET_KEY_PTRS(k, 1);
return 0;
-err:
- bch_bucket_free(c, k);
- bkey_put(c, k);
- return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4e6afa89921f..1d33e40d26ea 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -200,6 +200,7 @@ struct bucket {
uint8_t gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint16_t gc_mark; /* Bitfield used by GC. See below for field */
+ uint16_t reclaimable_in_gc:1;
};
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index d011a7154d33..4e6ccf2c8a0b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c)
mutex_lock(&c->bucket_lock);
- c->gc_mark_valid = 0;
c->gc_done = ZERO_KEY;
ca = c->cache;
for_each_bucket(b, ca) {
b->last_gc = b->gen;
+ if (bch_can_invalidate_bucket(ca, b))
+ b->reclaimable_in_gc = 1;
if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
}
}
+ c->gc_mark_valid = 0;
mutex_unlock(&c->bucket_lock);
}
@@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c)
for_each_bucket(b, ca) {
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+ if (b->reclaimable_in_gc)
+ b->reclaimable_in_gc = 0;
+
if (atomic_read(&b->pin))
continue;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 83d112bd2b1c..af345dc6fde1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -369,10 +369,24 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
struct io *i;
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
- c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
(bio_op(bio) == REQ_OP_DISCARD))
goto skip;
+ if (c->gc_stats.in_use > CUTOFF_CACHE_ADD) {
+ /*
+ * If cached buckets are all clean now, 'true' will be
+ * returned and all requests will bypass the cache device.
+ * Then c->sectors_to_gc has no chance to be negative, and
+ * gc thread won't wake up and caching won't work forever.
+ * Here call force_wake_up_gc() to avoid such aftermath.
+ */
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_CLEAN &&
+ c->gc_mark_valid)
+ force_wake_up_gc(c);
+
+ goto skip;
+ }
+
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
op_is_write(bio_op(bio))))
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cc66a27c363a..b2d5246cff21 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1981,10 +1981,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
- r = queue_limits_set(q, limits);
- if (r)
- return r;
-
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
@@ -2036,15 +2032,16 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* For a zoned target, setup the zones related queue attributes
* and resources necessary for zone append emulation if necessary.
*/
- if (blk_queue_is_zoned(q)) {
- r = dm_set_zones_restrictions(t, q);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
+ r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
- if (blk_queue_is_zoned(q) &&
- !static_key_enabled(&zoned_enabled.key))
- static_branch_enable(&zoned_enabled);
}
+ r = queue_limits_set(q, limits);
+ if (r)
+ return r;
+
dm_update_crypto_profile(q, t);
/*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 8e6bcb0d786a..5d66d916730e 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -160,37 +160,6 @@ static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
return 0;
}
-static int dm_check_zoned(struct mapped_device *md, struct dm_table *t)
-{
- struct gendisk *disk = md->disk;
- unsigned int nr_conv_zones = 0;
- int ret;
-
- /* Count conventional zones */
- md->zone_revalidate_map = t;
- ret = dm_blk_report_zones(disk, 0, UINT_MAX,
- dm_check_zoned_cb, &nr_conv_zones);
- md->zone_revalidate_map = NULL;
- if (ret < 0) {
- DMERR("Check zoned failed %d", ret);
- return ret;
- }
-
- /*
- * If we only have conventional zones, expose the mapped device as
- * a regular device.
- */
- if (nr_conv_zones >= ret) {
- disk->queue->limits.max_open_zones = 0;
- disk->queue->limits.max_active_zones = 0;
- disk->queue->limits.zoned = false;
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- disk->nr_zones = 0;
- }
-
- return 0;
-}
-
/*
* Revalidate the zones of a mapped device to initialize resource necessary
* for zone append emulation. Note that we cannot simply use the block layer
@@ -251,9 +220,12 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
return true;
}
-int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
+int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *lim)
{
struct mapped_device *md = t->md;
+ struct gendisk *disk = md->disk;
+ unsigned int nr_conv_zones = 0;
int ret;
/*
@@ -265,21 +237,37 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- blk_queue_max_zone_append_sectors(q, 0);
+ lim->max_zone_append_sectors = 0;
}
if (!get_capacity(md->disk))
return 0;
/*
- * Check that the mapped device will indeed be zoned, that is, that it
- * has sequential write required zones.
+ * Count conventional zones to check that the mapped device will indeed
+ * have sequential write required zones.
*/
- ret = dm_check_zoned(md, t);
- if (ret)
+ md->zone_revalidate_map = t;
+ ret = dm_blk_report_zones(disk, 0, UINT_MAX,
+ dm_check_zoned_cb, &nr_conv_zones);
+ md->zone_revalidate_map = NULL;
+ if (ret < 0) {
+ DMERR("Check zoned failed %d", ret);
return ret;
- if (!blk_queue_is_zoned(q))
+ }
+
+ /*
+ * If we only have conventional zones, expose the mapped device as
+ * a regular device.
+ */
+ if (nr_conv_zones >= ret) {
+ lim->max_open_zones = 0;
+ lim->max_active_zones = 0;
+ lim->zoned = false;
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ disk->nr_zones = 0;
return 0;
+ }
if (!md->disk->nr_zones) {
DMINFO("%s using %s zone append",
@@ -287,7 +275,13 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
queue_emulates_zone_append(q) ? "emulated" : "native");
}
- return dm_revalidate_zones(md, t);
+ ret = dm_revalidate_zones(md, t);
+ if (ret < 0)
+ return ret;
+
+ if (!static_key_enabled(&zoned_enabled.key))
+ static_branch_enable(&zoned_enabled);
+ return 0;
}
/*
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index e0c57f19839b..53ef8207fe2c 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -101,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* Zoned targets related functions.
*/
-int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
+int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index 40a8ebfcfce2..4bd4e324abc9 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -301,10 +301,10 @@ static int ipu6_isys_stream_start(struct ipu6_isys_video *av,
out_requeue:
if (bl && bl->nbufs)
ipu6_isys_buffer_list_queue(bl,
- (IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
- error) ?
+ IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
+ (error ?
IPU6_ISYS_BUFFER_LIST_FL_SET_STATE :
- 0, error ? VB2_BUF_STATE_ERROR :
+ 0), error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
flush_firmware_streamon_fail(stream);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 5992138c7290..8b9b77719bb1 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -678,6 +678,12 @@ static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
container_of(asc, struct sensor_async_sd, asc);
int ret;
+ if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) {
+ dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n",
+ s_asd->csi2.port);
+ return -EINVAL;
+ }
+
ret = ipu_bridge_instantiate_vcm(sd->dev);
if (ret) {
dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n");
@@ -925,39 +931,18 @@ static const struct dev_pm_ops isys_pm_ops = {
.resume = isys_resume,
};
-static void isys_remove(struct auxiliary_device *auxdev)
+static void free_fw_msg_bufs(struct ipu6_isys *isys)
{
- struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
- struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
- struct ipu6_device *isp = adev->isp;
+ struct device *dev = &isys->adev->auxdev.dev;
struct isys_fw_msgs *fwmsg, *safe;
- unsigned int i;
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
- dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
- fwmsg, fwmsg->dma_addr, 0);
+ dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
- dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
- fwmsg, fwmsg->dma_addr, 0);
-
- isys_unregister_devices(isys);
- isys_notifier_cleanup(isys);
-
- cpu_latency_qos_remove_request(&isys->pm_qos);
-
- if (!isp->secure_mode) {
- ipu6_cpd_free_pkg_dir(adev);
- ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
- release_firmware(adev->fw);
- }
-
- for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
- mutex_destroy(&isys->streams[i].mutex);
-
- isys_iwake_watermark_cleanup(isys);
- mutex_destroy(&isys->stream_mutex);
- mutex_destroy(&isys->mutex);
+ dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
}
static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
@@ -1140,12 +1125,14 @@ static int isys_probe(struct auxiliary_device *auxdev,
ret = isys_register_devices(isys);
if (ret)
- goto out_remove_pkg_dir_shared_buffer;
+ goto free_fw_msg_bufs;
ipu6_mmu_hw_cleanup(adev->mmu);
return 0;
+free_fw_msg_bufs:
+ free_fw_msg_bufs(isys);
out_remove_pkg_dir_shared_buffer:
if (!isp->secure_mode)
ipu6_cpd_free_pkg_dir(adev);
@@ -1167,6 +1154,34 @@ release_firmware:
return ret;
}
+static void isys_remove(struct auxiliary_device *auxdev)
+{
+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+ struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
+ struct ipu6_device *isp = adev->isp;
+ unsigned int i;
+
+ free_fw_msg_bufs(isys);
+
+ isys_unregister_devices(isys);
+ isys_notifier_cleanup(isys);
+
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ if (!isp->secure_mode) {
+ ipu6_cpd_free_pkg_dir(adev);
+ ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+ release_firmware(adev->fw);
+ }
+
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ isys_iwake_watermark_cleanup(isys);
+ mutex_destroy(&isys->stream_mutex);
+ mutex_destroy(&isys->mutex);
+}
+
struct fwmsg {
int type;
char *msg;
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index d2bebd208461..bbd646378ab3 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -285,7 +285,7 @@ EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
#define IPU6_ISYS_CSI2_NPORTS 4
#define IPU6SE_ISYS_CSI2_NPORTS 4
#define IPU6_TGL_ISYS_CSI2_NPORTS 8
-#define IPU6EP_MTL_ISYS_CSI2_NPORTS 4
+#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6
static void ipu6_internal_pdata_init(struct ipu6_device *isp)
{
@@ -727,9 +727,6 @@ static void ipu6_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
release_firmware(isp->cpd_fw);
ipu6_mmu_cleanup(psys_mmu);
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 89b582a221ab..f04a89584334 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -677,10 +677,13 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
return -ENODEV;
ret = ipu_bridge_init(&ipu->dev, ipu_bridge_parse_ssdb);
+ put_device(&ipu->dev);
if (ret < 0)
return ret;
- if (WARN_ON(!dev_fwnode(dev)))
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "mei-csi probed without device fwnode!\n");
return -ENXIO;
+ }
csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
if (!csi)
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 60498a5abebf..ab4f07e2e560 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev)
struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
int i;
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(mgbdev->debugfs);
-#endif
#if IS_REACHABLE(CONFIG_HWMON)
hwmon_device_unregister(mgbdev->hwmon_dev);
#endif
@@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev)
if (mgbdev->vin[i])
mgb4_vin_free(mgbdev->vin[i]);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mgbdev->debugfs);
+#endif
+
device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
free_spi(mgbdev);
free_i2c(mgbdev);
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 1280696f65f2..e80fb4ebfda6 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5152,7 +5152,7 @@ struct saa7134_board saa7134_boards[] = {
},
},
[SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = {
- /* Andy Shevchenko <andy@smile.org.ua> */
+ /* Andy Shevchenko <andy@kernel.org> */
.name = "Avermedia AVerTV Studio 507UA",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index f2c0f144c0fc..dacd3c96c9f5 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -210,6 +210,7 @@ static const struct regmap_access_table axp313a_volatile_table = {
static const struct regmap_range axp717_writeable_ranges[] = {
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
+ regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
};
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index faf983680040..ca0c6a728a00 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -602,4 +602,5 @@ source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
source "drivers/misc/mchp_pci1xxxx/Kconfig"
+source "drivers/misc/keba/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 153a3f4837e8..af125aa25a50 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o
obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o
obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o
obj-$(CONFIG_NSM) += nsm.o
+obj-y += keba/
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 693f0e539f37..6db4db975b9a 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -285,7 +285,7 @@ static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend,
#endif /* CONFIG_PM */
static const struct i2c_device_id apds9802als_id[] = {
- { DRIVER_NAME, 0 },
+ { DRIVER_NAME },
{ }
};
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 92b92be91d60..6d4edd69db12 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -625,15 +625,15 @@ static ssize_t apds990x_lux_show(struct device *dev,
struct apds990x_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
u32 result;
- long timeout;
+ long time_left;
if (pm_runtime_suspended(dev))
return -EIO;
- timeout = wait_event_interruptible_timeout(chip->wait,
- !chip->lux_wait_fresh_res,
- msecs_to_jiffies(APDS_TIMEOUT));
- if (!timeout)
+ time_left = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_fresh_res,
+ msecs_to_jiffies(APDS_TIMEOUT));
+ if (!time_left)
return -EIO;
mutex_lock(&chip->mutex);
@@ -1253,7 +1253,7 @@ static int apds990x_runtime_resume(struct device *dev)
#endif
static const struct i2c_device_id apds990x_id[] = {
- {"apds990x", 0 },
+ { "apds990x" },
{}
};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 1629b62fd052..0c052b05ab6a 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -680,15 +680,15 @@ static ssize_t bh1770_lux_result_show(struct device *dev,
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
- long timeout;
+ long time_left;
if (pm_runtime_suspended(dev))
return -EIO; /* Chip is not enabled at all */
- timeout = wait_event_interruptible_timeout(chip->wait,
- !chip->lux_wait_result,
- msecs_to_jiffies(BH1770_TIMEOUT));
- if (!timeout)
+ time_left = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_result,
+ msecs_to_jiffies(BH1770_TIMEOUT));
+ if (!time_left)
return -EIO;
mutex_lock(&chip->mutex);
@@ -1361,8 +1361,8 @@ static int bh1770_runtime_resume(struct device *dev)
#endif
static const struct i2c_device_id bh1770_id[] = {
- {"bh1770glc", 0 },
- {"sfh7770", 0 },
+ { "bh1770glc" },
+ { "sfh7770" },
{}
};
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 5f8dcd0e3848..4175df7ef011 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -271,7 +271,7 @@ static void ds1682_remove(struct i2c_client *client)
}
static const struct i2c_device_id ds1682_id[] = {
- { "ds1682", 0 },
+ { "ds1682" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1682_id);
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 4e61ac18cc96..9df12399bda3 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -109,6 +109,8 @@ config EEPROM_IDT_89HPESX
config EEPROM_EE1004
tristate "SPD EEPROMs on DDR4 memory modules"
depends on I2C && SYSFS
+ select NVMEM
+ select NVMEM_SYSFS
help
Enable this driver to get read support to SPD EEPROMs following
the JEDEC EE1004 standard. These are typically found on DDR4
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
index f1f766b70965..88888485e6f8 100644
--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -14,13 +14,12 @@
* and delete this driver.
*/
-#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
-#include <linux/eeprom_93xx46.h>
#define GPIO_EEPROM_CLK 216
#define GPIO_EEPROM_CS 210
@@ -29,22 +28,13 @@
#define GPIO_EEPROM_OE 255
#define EE_SPI_BUS_NUM 1
-static void digsy_mtc_op_prepare(void *p)
-{
- /* enable */
- gpio_set_value(GPIO_EEPROM_OE, 0);
-}
-
-static void digsy_mtc_op_finish(void *p)
-{
- /* disable */
- gpio_set_value(GPIO_EEPROM_OE, 1);
-}
+static const struct property_entry digsy_mtc_spi_properties[] = {
+ PROPERTY_ENTRY_U32("data-size", 8),
+ { }
+};
-struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
- .flags = EE_ADDR8,
- .prepare = digsy_mtc_op_prepare,
- .finish = digsy_mtc_op_finish,
+static const struct software_node digsy_mtc_spi_node = {
+ .properties = digsy_mtc_spi_properties,
};
static struct spi_gpio_platform_data eeprom_spi_gpio_data = {
@@ -70,18 +60,19 @@ static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
"miso", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CS,
"cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_OE,
+ "select", GPIO_ACTIVE_LOW),
{ },
},
};
static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = {
{
- .modalias = "93xx46",
+ .modalias = "eeprom-93xx46",
.max_speed_hz = 1000000,
.bus_num = EE_SPI_BUS_NUM,
.chip_select = 0,
.mode = SPI_MODE_0,
- .platform_data = &digsy_mtc_eeprom_data,
},
};
@@ -89,15 +80,18 @@ static int __init digsy_mtc_eeprom_devices_init(void)
{
int ret;
- ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH,
- "93xx46 EEPROMs OE");
- if (ret) {
- pr_err("can't request gpio %d\n", GPIO_EEPROM_OE);
- return ret;
- }
gpiod_add_lookup_table(&eeprom_spi_gpiod_table);
spi_register_board_info(digsy_mtc_eeprom_info,
ARRAY_SIZE(digsy_mtc_eeprom_info));
- return platform_device_register(&digsy_mtc_eeprom);
+
+ ret = device_add_software_node(&digsy_mtc_eeprom.dev, &digsy_mtc_spi_node);
+ if (ret)
+ return ret;
+
+ ret = platform_device_register(&digsy_mtc_eeprom);
+ if (ret)
+ device_remove_software_node(&digsy_mtc_eeprom.dev);
+
+ return ret;
}
device_initcall(digsy_mtc_eeprom_devices_init);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 21feebc3044c..d4aeeb2b2169 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -9,12 +9,14 @@
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*/
+#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nvmem-provider.h>
/*
* DDR4 memory modules use special EEPROMs following the Jedec EE1004
@@ -52,7 +54,7 @@ static struct ee1004_bus_data {
} ee1004_bus_data[EE1004_MAX_BUSSES];
static const struct i2c_device_id ee1004_ids[] = {
- { "ee1004", 0 },
+ { "ee1004" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ee1004_ids);
@@ -144,13 +146,17 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
-static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static int ee1004_read(void *priv, unsigned int off, void *val, size_t count)
{
- struct i2c_client *client = kobj_to_i2c_client(kobj);
- size_t requested = count;
- int ret = 0;
+ struct i2c_client *client = priv;
+ char *buf = val;
+ int ret;
+
+ if (unlikely(!count))
+ return count;
+
+ if (off + count > EE1004_EEPROM_SIZE)
+ return -EINVAL;
/*
* Read data from chip, protecting against concurrent access to
@@ -160,42 +166,52 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
while (count) {
ret = ee1004_eeprom_read(client, buf, off, count);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ mutex_unlock(&ee1004_bus_lock);
+ return ret;
+ }
buf += ret;
off += ret;
count -= ret;
}
-out:
+
mutex_unlock(&ee1004_bus_lock);
- return ret < 0 ? ret : requested;
+ return 0;
}
-static BIN_ATTR_RO(eeprom, EE1004_EEPROM_SIZE);
-
-static struct bin_attribute *ee1004_attrs[] = {
- &bin_attr_eeprom,
- NULL
-};
-
-BIN_ATTRIBUTE_GROUPS(ee1004);
-
static void ee1004_probe_temp_sensor(struct i2c_client *client)
{
struct i2c_board_info info = { .type = "jc42" };
- u8 byte14;
+ unsigned short addr = 0x18 | (client->addr & 7);
+ unsigned short addr_list[] = { addr, I2C_CLIENT_END };
+ u8 data[2];
int ret;
/* byte 14, bit 7 is set if temp sensor is present */
- ret = ee1004_eeprom_read(client, &byte14, 14, 1);
- if (ret != 1 || !(byte14 & BIT(7)))
+ ret = ee1004_eeprom_read(client, data, 14, 1);
+ if (ret != 1)
return;
- info.addr = 0x18 | (client->addr & 7);
-
- i2c_new_client_device(client->adapter, &info);
+ if (!(data[0] & BIT(7))) {
+ /*
+ * If the SPD data suggests that there is no temperature
+ * sensor, it may still be there for SPD revision 1.0.
+ * See SPD Annex L, Revision 1 and 2, for details.
+ * Check DIMM type and SPD revision; if it is a DDR4
+ * with SPD revision 1.0, check the thermal sensor address
+ * and instantiate the jc42 driver if a chip is found at
+ * that address.
+ * It is not necessary to check if there is a chip at the
+ * temperature sensor address since i2c_new_scanned_device()
+ * will do that and return silently if no chip is found.
+ */
+ ret = ee1004_eeprom_read(client, data, 1, 2);
+ if (ret != 2 || data[0] != 0x10 || data[1] != 0x0c)
+ return;
+ }
+ i2c_new_scanned_device(client->adapter, &info, addr_list, NULL);
}
static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd)
@@ -207,9 +223,36 @@ static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd)
}
}
+static void ee1004_cleanup_bus_data(void *data)
+{
+ struct ee1004_bus_data *bd = data;
+
+ /* Remove page select clients if this is the last device */
+ mutex_lock(&ee1004_bus_lock);
+ ee1004_cleanup(EE1004_NUM_PAGES, bd);
+ mutex_unlock(&ee1004_bus_lock);
+}
+
static int ee1004_probe(struct i2c_client *client)
{
+ struct nvmem_config config = {
+ .dev = &client->dev,
+ .name = dev_name(&client->dev),
+ .id = NVMEM_DEVID_NONE,
+ .owner = THIS_MODULE,
+ .type = NVMEM_TYPE_EEPROM,
+ .read_only = true,
+ .root_only = false,
+ .reg_read = ee1004_read,
+ .size = EE1004_EEPROM_SIZE,
+ .word_size = 1,
+ .stride = 1,
+ .priv = client,
+ .compat = true,
+ .base_dev = &client->dev,
+ };
struct ee1004_bus_data *bd;
+ struct nvmem_device *ndev;
int err, cnr = 0;
/* Make sure we can operate on this adapter */
@@ -228,6 +271,10 @@ static int ee1004_probe(struct i2c_client *client)
"Only %d busses supported", EE1004_MAX_BUSSES);
}
+ err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, bd);
+ if (err < 0)
+ return err;
+
i2c_set_clientdata(client, bd);
if (++bd->dev_count == 1) {
@@ -237,16 +284,18 @@ static int ee1004_probe(struct i2c_client *client)
cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr);
if (IS_ERR(cl)) {
- err = PTR_ERR(cl);
- goto err_clients;
+ mutex_unlock(&ee1004_bus_lock);
+ return PTR_ERR(cl);
}
bd->set_page[cnr] = cl;
}
/* Remember current page to avoid unneeded page select */
err = ee1004_get_current_page(bd);
- if (err < 0)
- goto err_clients;
+ if (err < 0) {
+ mutex_unlock(&ee1004_bus_lock);
+ return err;
+ }
dev_dbg(&client->dev, "Currently selected page: %d\n", err);
bd->current_page = err;
}
@@ -255,27 +304,15 @@ static int ee1004_probe(struct i2c_client *client)
mutex_unlock(&ee1004_bus_lock);
+ ndev = devm_nvmem_register(&client->dev, &config);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+
dev_info(&client->dev,
"%u byte EE1004-compliant SPD EEPROM, read-only\n",
EE1004_EEPROM_SIZE);
return 0;
-
- err_clients:
- ee1004_cleanup(cnr, bd);
- mutex_unlock(&ee1004_bus_lock);
-
- return err;
-}
-
-static void ee1004_remove(struct i2c_client *client)
-{
- struct ee1004_bus_data *bd = i2c_get_clientdata(client);
-
- /* Remove page select clients if this is the last device */
- mutex_lock(&ee1004_bus_lock);
- ee1004_cleanup(EE1004_NUM_PAGES, bd);
- mutex_unlock(&ee1004_bus_lock);
}
/*-------------------------------------------------------------------------*/
@@ -283,10 +320,8 @@ static void ee1004_remove(struct i2c_client *client)
static struct i2c_driver ee1004_driver = {
.driver = {
.name = "ee1004",
- .dev_groups = ee1004_groups,
},
.probe = ee1004_probe,
- .remove = ee1004_remove,
.id_table = ee1004_ids,
};
module_i2c_driver(ee1004_driver);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 45c8ae0db8f9..e2221be88445 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -5,19 +5,42 @@
* (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
-#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/string_choices.h>
+
#include <linux/nvmem-provider.h>
-#include <linux/eeprom_93xx46.h>
+
+struct eeprom_93xx46_platform_data {
+ unsigned char flags;
+#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
+#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
+#define EE_READONLY 0x08 /* forbid writing */
+#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
+#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
+#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
+
+ unsigned int quirks;
+/* Single word read transfers only; no sequential read. */
+#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
+/* Instructions such as EWEN are (addrlen + 2) in length. */
+#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
+
+ struct gpio_desc *select;
+};
#define OP_START 0x4
#define OP_WRITE (OP_START | 0x1)
@@ -96,15 +119,14 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
mutex_lock(&edev->lock);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
/* The opcode in front of the address is three bits. */
bits = edev->addrlen + 3;
while (count) {
struct spi_message m;
- struct spi_transfer t[2] = { { 0 } };
+ struct spi_transfer t[2] = {};
u16 cmd_addr = OP_READ << edev->addrlen;
size_t nbytes = count;
@@ -126,25 +148,23 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
bits += 1;
}
- spi_message_init(&m);
-
t[0].tx_buf = (char *)&cmd_addr;
t[0].len = 2;
t[0].bits_per_word = bits;
- spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = count;
t[1].bits_per_word = 8;
- spi_message_add_tail(&t[1], &m);
+
+ spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
err = spi_sync(edev->spi, &m);
/* have to wait at least Tcsl ns */
ndelay(250);
if (err) {
- dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
- nbytes, (int)off, err);
+ dev_err(&edev->spi->dev, "read %zu bytes at %u: err. %d\n",
+ nbytes, off, err);
break;
}
@@ -153,8 +173,7 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
count -= nbytes;
}
- if (edev->pdata->finish)
- edev->pdata->finish(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
mutex_unlock(&edev->lock);
@@ -164,7 +183,7 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
{
struct spi_message m;
- struct spi_transfer t;
+ struct spi_transfer t = {};
int bits, ret;
u16 cmd_addr;
@@ -182,31 +201,27 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
bits += 2;
}
- dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
- is_on ? "en" : "ds", cmd_addr, bits);
-
- spi_message_init(&m);
- memset(&t, 0, sizeof(t));
+ dev_dbg(&edev->spi->dev, "ew %s cmd 0x%04x, %d bits\n",
+ str_enable_disable(is_on), cmd_addr, bits);
t.tx_buf = &cmd_addr;
t.len = 2;
t.bits_per_word = bits;
- spi_message_add_tail(&t, &m);
+
+ spi_message_init_with_transfers(&m, &t, 1);
mutex_lock(&edev->lock);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
ret = spi_sync(edev->spi, &m);
/* have to wait at least Tcsl ns */
ndelay(250);
if (ret)
- dev_err(&edev->spi->dev, "erase/write %sable error %d\n",
- is_on ? "en" : "dis", ret);
+ dev_err(&edev->spi->dev, "erase/write %s error %d\n",
+ str_enable_disable(is_on), ret);
- if (edev->pdata->finish)
- edev->pdata->finish(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
mutex_unlock(&edev->lock);
return ret;
@@ -217,7 +232,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
const char *buf, unsigned off)
{
struct spi_message m;
- struct spi_transfer t[2];
+ struct spi_transfer t[2] = {};
int bits, data_len, ret;
u16 cmd_addr;
@@ -239,18 +254,15 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr);
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
-
t[0].tx_buf = (char *)&cmd_addr;
t[0].len = 2;
t[0].bits_per_word = bits;
- spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
t[1].len = data_len;
t[1].bits_per_word = 8;
- spi_message_add_tail(&t[1], &m);
+
+ spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
ret = spi_sync(edev->spi, &m);
/* have to wait program cycle time Twc ms */
@@ -263,7 +275,8 @@ static int eeprom_93xx46_write(void *priv, unsigned int off,
{
struct eeprom_93xx46_dev *edev = priv;
char *buf = val;
- int i, ret, step = 1;
+ int ret, step = 1;
+ unsigned int i;
if (unlikely(off >= edev->size))
return -EFBIG;
@@ -285,20 +298,17 @@ static int eeprom_93xx46_write(void *priv, unsigned int off,
mutex_lock(&edev->lock);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
for (i = 0; i < count; i += step) {
ret = eeprom_93xx46_write_word(edev, &buf[i], off + i);
if (ret) {
- dev_err(&edev->spi->dev, "write failed at %d: %d\n",
- (int)off + i, ret);
+ dev_err(&edev->spi->dev, "write failed at %u: %d\n", off + i, ret);
break;
}
}
- if (edev->pdata->finish)
- edev->pdata->finish(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
mutex_unlock(&edev->lock);
@@ -309,9 +319,8 @@ static int eeprom_93xx46_write(void *priv, unsigned int off,
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
- struct eeprom_93xx46_platform_data *pd = edev->pdata;
struct spi_message m;
- struct spi_transfer t;
+ struct spi_transfer t = {};
int bits, ret;
u16 cmd_addr;
@@ -331,18 +340,15 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
- spi_message_init(&m);
- memset(&t, 0, sizeof(t));
-
t.tx_buf = &cmd_addr;
t.len = 2;
t.bits_per_word = bits;
- spi_message_add_tail(&t, &m);
+
+ spi_message_init_with_transfers(&m, &t, 1);
mutex_lock(&edev->lock);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
ret = spi_sync(edev->spi, &m);
if (ret)
@@ -350,21 +356,23 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
/* have to wait erase cycle time Tec ms */
mdelay(6);
- if (pd->finish)
- pd->finish(edev);
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
mutex_unlock(&edev->lock);
return ret;
}
-static ssize_t eeprom_93xx46_store_erase(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t erase_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev);
- int erase = 0, ret;
+ bool erase;
+ int ret;
+
+ ret = kstrtobool(buf, &erase);
+ if (ret)
+ return ret;
- sscanf(buf, "%d", &erase);
if (erase) {
ret = eeprom_93xx46_ew(edev, 1);
if (ret)
@@ -378,21 +386,7 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
}
return count;
}
-static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
-
-static void select_assert(void *context)
-{
- struct eeprom_93xx46_dev *edev = context;
-
- gpiod_set_value_cansleep(edev->pdata->select, 1);
-}
-
-static void select_deassert(void *context)
-{
- struct eeprom_93xx46_dev *edev = context;
-
- gpiod_set_value_cansleep(edev->pdata->select, 0);
-}
+static DEVICE_ATTR_WO(erase);
static const struct of_device_id eeprom_93xx46_of_table[] = {
{ .compatible = "eeprom-93xx46", .data = &at93c46_data, },
@@ -422,22 +416,20 @@ static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
-static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+static int eeprom_93xx46_probe_fw(struct device *dev)
{
- const struct of_device_id *of_id =
- of_match_device(eeprom_93xx46_of_table, &spi->dev);
- struct device_node *np = spi->dev.of_node;
+ const struct eeprom_93xx46_devtype_data *data;
struct eeprom_93xx46_platform_data *pd;
u32 tmp;
int ret;
- pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
- ret = of_property_read_u32(np, "data-size", &tmp);
+ ret = device_property_read_u32(dev, "data-size", &tmp);
if (ret < 0) {
- dev_err(&spi->dev, "data-size property not found\n");
+ dev_err(dev, "data-size property not found\n");
return ret;
}
@@ -446,30 +438,25 @@ static int eeprom_93xx46_probe_dt(struct spi_device *spi)
} else if (tmp == 16) {
pd->flags |= EE_ADDR16;
} else {
- dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+ dev_err(dev, "invalid data-size (%d)\n", tmp);
return -EINVAL;
}
- if (of_property_read_bool(np, "read-only"))
+ if (device_property_read_bool(dev, "read-only"))
pd->flags |= EE_READONLY;
- pd->select = devm_gpiod_get_optional(&spi->dev, "select",
- GPIOD_OUT_LOW);
+ pd->select = devm_gpiod_get_optional(dev, "select", GPIOD_OUT_LOW);
if (IS_ERR(pd->select))
return PTR_ERR(pd->select);
+ gpiod_set_consumer_name(pd->select, "93xx46 EEPROMs OE");
- pd->prepare = select_assert;
- pd->finish = select_deassert;
- gpiod_direction_output(pd->select, 0);
-
- if (of_id->data) {
- const struct eeprom_93xx46_devtype_data *data = of_id->data;
-
+ data = spi_get_device_match_data(to_spi_device(dev));
+ if (data) {
pd->quirks = data->quirks;
pd->flags |= data->flags;
}
- spi->dev.platform_data = pd;
+ dev->platform_data = pd;
return 0;
}
@@ -478,13 +465,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
+ struct device *dev = &spi->dev;
int err;
- if (spi->dev.of_node) {
- err = eeprom_93xx46_probe_dt(spi);
- if (err < 0)
- return err;
- }
+ err = eeprom_93xx46_probe_fw(dev);
+ if (err < 0)
+ return err;
pd = spi->dev.platform_data;
if (!pd) {
@@ -565,7 +551,7 @@ static void eeprom_93xx46_remove(struct spi_device *spi)
static struct spi_driver eeprom_93xx46_driver = {
.driver = {
.name = "93xx46",
- .of_match_table = of_match_ptr(eeprom_93xx46_of_table),
+ .of_match_table = eeprom_93xx46_of_table,
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 327afb866b21..43421fe37d33 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1426,58 +1426,58 @@ MODULE_DEVICE_TABLE(i2c, ee_ids);
* idt_ids - supported IDT 89HPESx devices
*/
static const struct i2c_device_id idt_ids[] = {
- { "89hpes8nt2", 0 },
- { "89hpes12nt3", 0 },
-
- { "89hpes24nt6ag2", 0 },
- { "89hpes32nt8ag2", 0 },
- { "89hpes32nt8bg2", 0 },
- { "89hpes12nt12g2", 0 },
- { "89hpes16nt16g2", 0 },
- { "89hpes24nt24g2", 0 },
- { "89hpes32nt24ag2", 0 },
- { "89hpes32nt24bg2", 0 },
-
- { "89hpes12n3", 0 },
- { "89hpes12n3a", 0 },
- { "89hpes24n3", 0 },
- { "89hpes24n3a", 0 },
-
- { "89hpes32h8", 0 },
- { "89hpes32h8g2", 0 },
- { "89hpes48h12", 0 },
- { "89hpes48h12g2", 0 },
- { "89hpes48h12ag2", 0 },
- { "89hpes16h16", 0 },
- { "89hpes22h16", 0 },
- { "89hpes22h16g2", 0 },
- { "89hpes34h16", 0 },
- { "89hpes34h16g2", 0 },
- { "89hpes64h16", 0 },
- { "89hpes64h16g2", 0 },
- { "89hpes64h16ag2", 0 },
-
- /* { "89hpes3t3", 0 }, // No SMBus-slave iface */
- { "89hpes12t3g2", 0 },
- { "89hpes24t3g2", 0 },
- /* { "89hpes4t4", 0 }, // No SMBus-slave iface */
- { "89hpes16t4", 0 },
- { "89hpes4t4g2", 0 },
- { "89hpes10t4g2", 0 },
- { "89hpes16t4g2", 0 },
- { "89hpes16t4ag2", 0 },
- { "89hpes5t5", 0 },
- { "89hpes6t5", 0 },
- { "89hpes8t5", 0 },
- { "89hpes8t5a", 0 },
- { "89hpes24t6", 0 },
- { "89hpes6t6g2", 0 },
- { "89hpes24t6g2", 0 },
- { "89hpes16t7", 0 },
- { "89hpes32t8", 0 },
- { "89hpes32t8g2", 0 },
- { "89hpes48t12", 0 },
- { "89hpes48t12g2", 0 },
+ { "89hpes8nt2" },
+ { "89hpes12nt3" },
+
+ { "89hpes24nt6ag2" },
+ { "89hpes32nt8ag2" },
+ { "89hpes32nt8bg2" },
+ { "89hpes12nt12g2" },
+ { "89hpes16nt16g2" },
+ { "89hpes24nt24g2" },
+ { "89hpes32nt24ag2" },
+ { "89hpes32nt24bg2" },
+
+ { "89hpes12n3" },
+ { "89hpes12n3a" },
+ { "89hpes24n3" },
+ { "89hpes24n3a" },
+
+ { "89hpes32h8" },
+ { "89hpes32h8g2" },
+ { "89hpes48h12" },
+ { "89hpes48h12g2" },
+ { "89hpes48h12ag2" },
+ { "89hpes16h16" },
+ { "89hpes22h16" },
+ { "89hpes22h16g2" },
+ { "89hpes34h16" },
+ { "89hpes34h16g2" },
+ { "89hpes64h16" },
+ { "89hpes64h16g2" },
+ { "89hpes64h16ag2" },
+
+ /* { "89hpes3t3" }, // No SMBus-slave iface */
+ { "89hpes12t3g2" },
+ { "89hpes24t3g2" },
+ /* { "89hpes4t4" }, // No SMBus-slave iface */
+ { "89hpes16t4" },
+ { "89hpes4t4g2" },
+ { "89hpes10t4g2" },
+ { "89hpes16t4g2" },
+ { "89hpes16t4ag2" },
+ { "89hpes5t5" },
+ { "89hpes6t5" },
+ { "89hpes8t5" },
+ { "89hpes8t5a" },
+ { "89hpes24t6" },
+ { "89hpes6t6g2" },
+ { "89hpes24t6g2" },
+ { "89hpes16t7" },
+ { "89hpes32t8" },
+ { "89hpes32t8g2" },
+ { "89hpes48t12" },
+ { "89hpes48t12g2" },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(i2c, idt_ids);
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index cb6b1efeafe0..6fab2ffa736b 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -183,7 +183,7 @@ static void max6875_remove(struct i2c_client *client)
}
static const struct i2c_device_id max6875_id[] = {
- { "max6875", 0 },
+ { "max6875" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6875_id);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4c67e2c5a82e..95931abc3770 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -325,7 +325,7 @@ static void fastrpc_free_map(struct kref *ref)
err = qcom_scm_assign_mem(map->phys, map->size,
&src_perms, &perm, 1);
if (err) {
- dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
map->phys, map->size, err);
return;
}
@@ -816,7 +816,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
map->attr = attr;
err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
if (err) {
- dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
+ dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
map->phys, map->size, err);
goto map_err;
}
@@ -953,7 +953,10 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
ctx->msg_sz = pkt_size;
- err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
+ if (ctx->fl->sctx->sid)
+ err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
+ else
+ err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
if (err)
return err;
@@ -1222,7 +1225,7 @@ static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_reques
* that does not support unsigned PD offload
*/
if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
- dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
+ dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n");
return true;
}
}
@@ -1259,17 +1262,12 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
goto err;
}
- name = kzalloc(init.namelen, GFP_KERNEL);
- if (!name) {
- err = -ENOMEM;
+ name = memdup_user(u64_to_user_ptr(init.name), init.namelen);
+ if (IS_ERR(name)) {
+ err = PTR_ERR(name);
goto err;
}
- if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) {
- err = -EFAULT;
- goto err_name;
- }
-
if (!fl->cctx->remote_heap) {
err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
&fl->cctx->remote_heap);
@@ -1285,7 +1283,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
&src_perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
- dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
+ dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
goto err_map;
}
@@ -1337,7 +1335,7 @@ err_invoke:
(u64)fl->cctx->remote_heap->size,
&src_perms, &dst_perms, 1);
if (err)
- dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
}
err_map:
@@ -2255,6 +2253,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
int i, err, domain_id = -1, vmcount;
const char *domain;
bool secure_dsp;
+ struct device_node *rmem_node;
+ struct reserved_mem *rmem;
unsigned int vmids[FASTRPC_MAX_VMIDS];
err = of_property_read_string(rdev->of_node, "label", &domain);
@@ -2297,6 +2297,23 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
}
}
+ rmem_node = of_parse_phandle(rdev->of_node, "memory-region", 0);
+ if (domain_id == SDSP_DOMAIN_ID && rmem_node) {
+ u64 src_perms;
+
+ rmem = of_reserved_mem_lookup(rmem_node);
+ if (!rmem) {
+ err = -EINVAL;
+ goto fdev_error;
+ }
+
+ src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ qcom_scm_assign_mem(rmem->base, rmem->size, &src_perms,
+ data->vmperms, data->vmcount);
+
+ }
+
secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
data->secure = secure_dsp;
@@ -2478,5 +2495,6 @@ static void fastrpc_exit(void)
}
module_exit(fastrpc_exit);
+MODULE_DESCRIPTION("Qualcomm FastRPC");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 759eaeb64307..ff92c6edff6b 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -121,7 +121,7 @@ static void hmc6352_remove(struct i2c_client *client)
}
static const struct i2c_device_id hmc6352_id[] = {
- { "hmc6352", 0 },
+ { "hmc6352" },
{ }
};
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index ee6296b98078..4cdb1087838f 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -95,7 +95,7 @@ static int ics932s401_detect(struct i2c_client *client,
static void ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
- { "ics932s401", 0 },
+ { "ics932s401" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ics932s401_id);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index ebf0635aee64..9f26db467a81 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -449,7 +449,7 @@ static SIMPLE_DEV_PM_OPS(isl29003_pm_ops, isl29003_suspend, isl29003_resume);
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id isl29003_id[] = {
- { "isl29003", 0 },
+ { "isl29003" },
{}
};
MODULE_DEVICE_TABLE(i2c, isl29003_id);
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index c5976fa8c825..1643ba2ff964 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -177,7 +177,7 @@ static void isl29020_remove(struct i2c_client *client)
}
static const struct i2c_device_id isl29020_id[] = {
- { "isl29020", 0 },
+ { "isl29020" },
{ }
};
diff --git a/drivers/misc/keba/Kconfig b/drivers/misc/keba/Kconfig
new file mode 100644
index 000000000000..0ebca1d07ef4
--- /dev/null
+++ b/drivers/misc/keba/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config KEBA_CP500
+ tristate "KEBA CP500 system FPGA support"
+ depends on PCI
+ help
+ This driver supports the KEBA CP500 system FPGA, which is used in
+ KEBA CP500 devices. It registers all sub devices present on the CP500
+ system FPGA as separate devices. A driver is needed for each sub
+ device.
+
+ This driver can also be built as a module. If so, the module will be
+ called cp500.
diff --git a/drivers/misc/keba/Makefile b/drivers/misc/keba/Makefile
new file mode 100644
index 000000000000..0a8b846cda7d
--- /dev/null
+++ b/drivers/misc/keba/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_KEBA_CP500) += cp500.o
diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c
new file mode 100644
index 000000000000..9ba46f0f9392
--- /dev/null
+++ b/drivers/misc/keba/cp500.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA system FPGA
+ *
+ * The KEBA system FPGA implements various devices. This driver registers
+ * auxiliary devices for every device within the FPGA.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/misc/keba.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define CP500 "cp500"
+
+#define PCI_VENDOR_ID_KEBA 0xCEBA
+#define PCI_DEVICE_ID_KEBA_CP035 0x2706
+#define PCI_DEVICE_ID_KEBA_CP505 0x2703
+#define PCI_DEVICE_ID_KEBA_CP520 0x2696
+
+#define CP500_SYS_BAR 0
+#define CP500_ECM_BAR 1
+
+/* BAR 0 registers */
+#define CP500_VERSION_REG 0x00
+#define CP500_RECONFIG_REG 0x11 /* upper 8-bits of STARTUP register */
+#define CP500_AXI_REG 0x40
+
+/* Bits in BUILD_REG */
+#define CP500_BUILD_TEST 0x8000 /* FPGA test version */
+
+/* Bits in RECONFIG_REG */
+#define CP500_RECFG_REQ 0x01 /* reconfigure FPGA on next reset */
+
+/* MSIX */
+#define CP500_AXI_MSIX 3
+#define CP500_NUM_MSIX 8
+#define CP500_NUM_MSIX_NO_MMI 2
+#define CP500_NUM_MSIX_NO_AXI 3
+
+/* EEPROM */
+#define CP500_HW_CPU_EEPROM_NAME "cp500_cpu_eeprom"
+
+#define CP500_IS_CP035(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP035)
+#define CP500_IS_CP505(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP505)
+#define CP500_IS_CP520(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP520)
+
+struct cp500_dev_info {
+ off_t offset;
+ size_t size;
+};
+
+struct cp500_devs {
+ struct cp500_dev_info startup;
+ struct cp500_dev_info i2c;
+};
+
+/* list of devices within FPGA of CP035 family (CP035, CP056, CP057) */
+static struct cp500_devs cp035_devices = {
+ .startup = { 0x0000, SZ_4K },
+ .i2c = { 0x4000, SZ_4K },
+};
+
+/* list of devices within FPGA of CP505 family (CP503, CP505, CP507) */
+static struct cp500_devs cp505_devices = {
+ .startup = { 0x0000, SZ_4K },
+ .i2c = { 0x5000, SZ_4K },
+};
+
+/* list of devices within FPGA of CP520 family (CP520, CP530) */
+static struct cp500_devs cp520_devices = {
+ .startup = { 0x0000, SZ_4K },
+ .i2c = { 0x5000, SZ_4K },
+};
+
+struct cp500 {
+ struct pci_dev *pci_dev;
+ struct cp500_devs *devs;
+ int msix_num;
+ struct {
+ int major;
+ int minor;
+ int build;
+ } version;
+
+ /* system FPGA BAR */
+ resource_size_t sys_hwbase;
+ struct keba_i2c_auxdev *i2c;
+
+ /* ECM EtherCAT BAR */
+ resource_size_t ecm_hwbase;
+
+ void __iomem *system_startup_addr;
+};
+
+/* I2C devices */
+static struct i2c_board_info cp500_i2c_info[] = {
+ { /* temperature sensor */
+ I2C_BOARD_INFO("emc1403", 0x4c),
+ },
+ { /*
+ * CPU EEPROM
+ * CP035 family: CPU board
+ * CP505 family: bridge board
+ * CP520 family: carrier board
+ */
+ I2C_BOARD_INFO("24c32", 0x50),
+ .dev_name = CP500_HW_CPU_EEPROM_NAME,
+ },
+ { /* interface board EEPROM */
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+ { /*
+ * EEPROM (optional)
+ * CP505 family: CPU board
+ * CP520 family: MMI board
+ */
+ I2C_BOARD_INFO("24c32", 0x52),
+ },
+ { /* extension module 0 EEPROM (optional) */
+ I2C_BOARD_INFO("24c32", 0x53),
+ },
+ { /* extension module 1 EEPROM (optional) */
+ I2C_BOARD_INFO("24c32", 0x54),
+ },
+ { /* extension module 2 EEPROM (optional) */
+ I2C_BOARD_INFO("24c32", 0x55),
+ },
+ { /* extension module 3 EEPROM (optional) */
+ I2C_BOARD_INFO("24c32", 0x56),
+ }
+};
+
+static ssize_t cp500_get_fpga_version(struct cp500 *cp500, char *buf,
+ size_t max_len)
+{
+ int n;
+
+ if (CP500_IS_CP035(cp500))
+ n = scnprintf(buf, max_len, "CP035");
+ else if (CP500_IS_CP505(cp500))
+ n = scnprintf(buf, max_len, "CP505");
+ else
+ n = scnprintf(buf, max_len, "CP500");
+
+ n += scnprintf(buf + n, max_len - n, "_FPGA_%d.%02d",
+ cp500->version.major, cp500->version.minor);
+
+ /* test versions have test bit set */
+ if (cp500->version.build & CP500_BUILD_TEST)
+ n += scnprintf(buf + n, max_len - n, "Test%d",
+ cp500->version.build & ~CP500_BUILD_TEST);
+
+ n += scnprintf(buf + n, max_len - n, "\n");
+
+ return n;
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cp500 *cp500 = dev_get_drvdata(dev);
+
+ return cp500_get_fpga_version(cp500, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t keep_cfg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cp500 *cp500 = dev_get_drvdata(dev);
+ unsigned long keep_cfg = 1;
+
+ /*
+ * FPGA configuration stream is kept during reset when RECONFIG bit is
+ * zero
+ */
+ if (ioread8(cp500->system_startup_addr + CP500_RECONFIG_REG) &
+ CP500_RECFG_REQ)
+ keep_cfg = 0;
+
+ return sysfs_emit(buf, "%lu\n", keep_cfg);
+}
+
+static ssize_t keep_cfg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cp500 *cp500 = dev_get_drvdata(dev);
+ unsigned long keep_cfg;
+
+ if (kstrtoul(buf, 10, &keep_cfg) < 0)
+ return -EINVAL;
+
+ /*
+ * In normal operation "keep_cfg" is "1". This means that the FPGA keeps
+ * its configuration stream during a reset.
+ * In case of a firmware update of the FPGA, the configuration stream
+ * needs to be reloaded. This can be done without a powercycle by
+ * writing a "0" into the "keep_cfg" attribute. After a reset/reboot th
+ * new configuration stream will be loaded.
+ */
+ if (keep_cfg)
+ iowrite8(0, cp500->system_startup_addr + CP500_RECONFIG_REG);
+ else
+ iowrite8(CP500_RECFG_REQ,
+ cp500->system_startup_addr + CP500_RECONFIG_REG);
+
+ return count;
+}
+static DEVICE_ATTR_RW(keep_cfg);
+
+static struct attribute *attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_keep_cfg.attr,
+ NULL
+};
+static const struct attribute_group attrs_group = { .attrs = attrs };
+
+static void cp500_i2c_release(struct device *dev)
+{
+ struct keba_i2c_auxdev *i2c =
+ container_of(dev, struct keba_i2c_auxdev, auxdev.dev);
+
+ kfree(i2c);
+}
+
+static int cp500_register_i2c(struct cp500 *cp500)
+{
+ int retval;
+
+ cp500->i2c = kzalloc(sizeof(*cp500->i2c), GFP_KERNEL);
+ if (!cp500->i2c)
+ return -ENOMEM;
+
+ cp500->i2c->auxdev.name = "i2c";
+ cp500->i2c->auxdev.id = 0;
+ cp500->i2c->auxdev.dev.release = cp500_i2c_release;
+ cp500->i2c->auxdev.dev.parent = &cp500->pci_dev->dev;
+ cp500->i2c->io = (struct resource) {
+ /* I2C register area */
+ .start = (resource_size_t) cp500->sys_hwbase +
+ cp500->devs->i2c.offset,
+ .end = (resource_size_t) cp500->sys_hwbase +
+ cp500->devs->i2c.offset +
+ cp500->devs->i2c.size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ cp500->i2c->info_size = ARRAY_SIZE(cp500_i2c_info);
+ cp500->i2c->info = cp500_i2c_info;
+
+ retval = auxiliary_device_init(&cp500->i2c->auxdev);
+ if (retval) {
+ kfree(cp500->i2c);
+ cp500->i2c = NULL;
+
+ return retval;
+ }
+ retval = __auxiliary_device_add(&cp500->i2c->auxdev, "keba");
+ if (retval) {
+ auxiliary_device_uninit(&cp500->i2c->auxdev);
+ cp500->i2c = NULL;
+
+ return retval;
+ }
+
+ return 0;
+}
+
+static void cp500_register_auxiliary_devs(struct cp500 *cp500)
+{
+ struct device *dev = &cp500->pci_dev->dev;
+
+ if (cp500_register_i2c(cp500))
+ dev_warn(dev, "Failed to register i2c!\n");
+}
+
+static void cp500_unregister_dev(struct auxiliary_device *auxdev)
+{
+ auxiliary_device_delete(auxdev);
+ auxiliary_device_uninit(auxdev);
+}
+
+static void cp500_unregister_auxiliary_devs(struct cp500 *cp500)
+{
+
+ if (cp500->i2c) {
+ cp500_unregister_dev(&cp500->i2c->auxdev);
+ cp500->i2c = NULL;
+ }
+}
+
+static irqreturn_t cp500_axi_handler(int irq, void *dev)
+{
+ struct cp500 *cp500 = dev;
+ u32 axi_address = ioread32(cp500->system_startup_addr + CP500_AXI_REG);
+
+ /*
+ * FPGA signals AXI response error, print AXI address to indicate which
+ * IP core was affected
+ */
+ dev_err(&cp500->pci_dev->dev, "AXI response error at 0x%08x\n",
+ axi_address);
+
+ return IRQ_HANDLED;
+}
+
+static int cp500_enable(struct cp500 *cp500)
+{
+ int axi_irq = -1;
+ int ret;
+
+ if (cp500->msix_num > CP500_NUM_MSIX_NO_AXI) {
+ axi_irq = pci_irq_vector(cp500->pci_dev, CP500_AXI_MSIX);
+ ret = request_irq(axi_irq, cp500_axi_handler, 0,
+ CP500, cp500);
+ if (ret != 0) {
+ dev_err(&cp500->pci_dev->dev,
+ "Failed to register AXI response error!\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cp500_disable(struct cp500 *cp500)
+{
+ int axi_irq;
+
+ if (cp500->msix_num > CP500_NUM_MSIX_NO_AXI) {
+ axi_irq = pci_irq_vector(cp500->pci_dev, CP500_AXI_MSIX);
+ free_irq(axi_irq, cp500);
+ }
+}
+
+static int cp500_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ struct device *dev = &pci_dev->dev;
+ struct resource startup;
+ struct cp500 *cp500;
+ u32 cp500_vers;
+ char buf[64];
+ int ret;
+
+ cp500 = devm_kzalloc(dev, sizeof(*cp500), GFP_KERNEL);
+ if (!cp500)
+ return -ENOMEM;
+ cp500->pci_dev = pci_dev;
+ cp500->sys_hwbase = pci_resource_start(pci_dev, CP500_SYS_BAR);
+ cp500->ecm_hwbase = pci_resource_start(pci_dev, CP500_ECM_BAR);
+ if (!cp500->sys_hwbase || !cp500->ecm_hwbase)
+ return -ENODEV;
+
+ if (CP500_IS_CP035(cp500))
+ cp500->devs = &cp035_devices;
+ else if (CP500_IS_CP505(cp500))
+ cp500->devs = &cp505_devices;
+ else if (CP500_IS_CP520(cp500))
+ cp500->devs = &cp520_devices;
+ else
+ return -ENODEV;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
+ pci_set_master(pci_dev);
+
+ startup = *pci_resource_n(pci_dev, CP500_SYS_BAR);
+ startup.end = startup.start + cp500->devs->startup.size - 1;
+ cp500->system_startup_addr = devm_ioremap_resource(&pci_dev->dev,
+ &startup);
+ if (IS_ERR(cp500->system_startup_addr)) {
+ ret = PTR_ERR(cp500->system_startup_addr);
+ goto out_disable;
+ }
+
+ cp500->msix_num = pci_alloc_irq_vectors(pci_dev, CP500_NUM_MSIX_NO_MMI,
+ CP500_NUM_MSIX, PCI_IRQ_MSIX);
+ if (cp500->msix_num < CP500_NUM_MSIX_NO_MMI) {
+ dev_err(&pci_dev->dev,
+ "Hardware does not support enough MSI-X interrupts\n");
+ ret = -ENODEV;
+ goto out_disable;
+ }
+
+ cp500_vers = ioread32(cp500->system_startup_addr + CP500_VERSION_REG);
+ cp500->version.major = (cp500_vers & 0xff);
+ cp500->version.minor = (cp500_vers >> 8) & 0xff;
+ cp500->version.build = (cp500_vers >> 16) & 0xffff;
+ cp500_get_fpga_version(cp500, buf, sizeof(buf));
+
+ dev_info(&pci_dev->dev, "FPGA version %s", buf);
+
+ pci_set_drvdata(pci_dev, cp500);
+
+ ret = sysfs_create_group(&pci_dev->dev.kobj, &attrs_group);
+ if (ret != 0)
+ goto out_free_irq;
+
+ ret = cp500_enable(cp500);
+ if (ret != 0)
+ goto out_remove_group;
+
+ cp500_register_auxiliary_devs(cp500);
+
+ return 0;
+
+out_remove_group:
+ sysfs_remove_group(&pci_dev->dev.kobj, &attrs_group);
+out_free_irq:
+ pci_free_irq_vectors(pci_dev);
+out_disable:
+ pci_clear_master(pci_dev);
+ pci_disable_device(pci_dev);
+
+ return ret;
+}
+
+static void cp500_remove(struct pci_dev *pci_dev)
+{
+ struct cp500 *cp500 = pci_get_drvdata(pci_dev);
+
+ cp500_unregister_auxiliary_devs(cp500);
+
+ cp500_disable(cp500);
+
+ sysfs_remove_group(&pci_dev->dev.kobj, &attrs_group);
+
+ pci_set_drvdata(pci_dev, 0);
+
+ pci_free_irq_vectors(pci_dev);
+
+ pci_clear_master(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+static struct pci_device_id cp500_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP035) },
+ { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP505) },
+ { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP520) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, cp500_ids);
+
+static struct pci_driver cp500_driver = {
+ .name = CP500,
+ .id_table = cp500_ids,
+ .probe = cp500_probe,
+ .remove = cp500_remove,
+};
+module_pci_driver(cp500_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA CP500 system FPGA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
index 32af2b14ff34..34c9be437432 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
- if (!aux_bus->aux_device_wrapper[1])
- return -ENOMEM;
+ if (!aux_bus->aux_device_wrapper[1]) {
+ retval = -ENOMEM;
+ goto err_aux_dev_add_0;
+ }
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ goto err_aux_dev_add_0;
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
@@ -120,6 +123,7 @@ err_ida_alloc_1:
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ goto err_ret;
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
@@ -127,6 +131,7 @@ err_aux_dev_init_0:
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
+err_ret:
return retval;
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 2733070acf39..9eebeffcd8fd 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -80,6 +80,8 @@ static void whitelist(struct mei_cl_device *cldev)
cldev->do_match = 1;
}
+#define MKHI_SEND_MAX_TIMEOUT_MSEC 4000
+
#define OSTYPE_LINUX 2
struct mei_os_ver {
__le16 build;
@@ -128,7 +130,7 @@ static int mei_osver(struct mei_cl_device *cldev)
os_ver = (struct mei_os_ver *)fwcaps->data;
os_ver->os_type = OSTYPE_LINUX;
- return __mei_cl_send(cldev->cl, buf, size, 0, mode);
+ return __mei_cl_send_timeout(cldev->cl, buf, size, 0, mode, MKHI_SEND_MAX_TIMEOUT_MSEC);
}
#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
@@ -148,8 +150,8 @@ static int mei_fwver(struct mei_cl_device *cldev)
req.hdr.group_id = MKHI_GEN_GROUP_ID;
req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
- ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
- MEI_CL_IO_TX_BLOCKING);
+ ret = __mei_cl_send_timeout(cldev->cl, (u8 *)&req, sizeof(req), 0,
+ MEI_CL_IO_TX_BLOCKING, MKHI_SEND_MAX_TIMEOUT_MSEC);
if (ret < 0) {
dev_info(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
return ret;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 79e6f3c1341f..40c3fe26f76d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "is not connected");
+ cl_dbg(dev, cl, "is not connected");
rets = -ENODEV;
goto out;
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7f59dd38c32f..6589635f8ba3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
}
err = mei_restart(dev);
- if (err)
+ if (err) {
+ free_irq(pdev->irq, dev);
return err;
+ }
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b543e6b9f3cf..1ec65d87488a 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_device *mei_dev;
+ int ret = 0;
- mei_stop(mei_dev);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
- mei_disable_interrupts(mei_dev);
+ mutex_lock(&mei_dev->device_lock);
- vsc_tp_free_irq(hw->tp);
+ if (!mei_write_is_idle(mei_dev))
+ ret = -EAGAIN;
- return 0;
+ mutex_unlock(&mei_dev->device_lock);
+
+ return ret;
}
static int mei_vsc_resume(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- int ret;
-
- ret = vsc_tp_request_irq(hw->tp);
- if (ret)
- return ret;
-
- ret = mei_restart(mei_dev);
- if (ret)
- goto err_free;
+ struct mei_device *mei_dev;
- /* start timer if stopped in suspend */
- schedule_delayed_work(&mei_dev->timer_work, HZ);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
return 0;
-
-err_free:
- vsc_tp_free_irq(hw->tp);
-
- return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
index ffa4ccd96a10..596a9d695dfc 100644
--- a/drivers/misc/mei/vsc-fw-loader.c
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
union acpi_object obj = {
- .type = ACPI_TYPE_INTEGER,
+ .integer.type = ACPI_TYPE_INTEGER,
.integer.value = 1,
};
struct acpi_object_list arg_list = {
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index 1e3eb2aa44d9..e6a61e6d9427 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -201,5 +201,6 @@ static void __exit open_dice_exit(void)
module_init(open_dice_init);
module_exit(open_dice_exit);
+MODULE_DESCRIPTION("Driver for Open Profile for DICE.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("David Brazdil <dbrazdil@google.com>");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index c4f963cf96f2..ff172cf4614d 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -198,7 +198,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
static const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
- long timeout;
+ long time_left;
pr_debug("%s", __func__);
@@ -208,11 +208,11 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
return -EIO;
}
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME));
- if (timeout <= 0) {
+ if (time_left <= 0) {
pr_err(" waiting for ver info- timed out or received signal");
- return timeout ? -ERESTARTSYS : -ETIMEDOUT;
+ return time_left ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
/*
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 7dd86a9858ab..1d54680d6ed2 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -229,7 +229,7 @@ static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
- unsigned long timeout;
+ unsigned long time_left;
unsigned int good_sockets = 0, bad_sockets = 0;
unsigned long flags;
/* Maximum number of entries is 4 */
@@ -265,8 +265,8 @@ static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
if (good_sockets) {
fm->finish_me = &finish_resume;
spin_unlock_irqrestore(&fm->lock, flags);
- timeout = wait_for_completion_timeout(&finish_resume, HZ);
- dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
+ time_left = wait_for_completion_timeout(&finish_resume, HZ);
+ dev_dbg(&dev->dev, "wait returned %lu\n", time_left);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index a3bc2823143e..2ad4387c9837 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -420,7 +420,7 @@ static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume);
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id tsl2550_id[] = {
- { "tsl2550", 0 },
+ { "tsl2550" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tsl2550_id);
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index e2015c87f03f..41b8c2119e20 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -32,6 +32,7 @@
struct vcpu_stall_detect_config {
u32 clock_freq_hz;
u32 stall_timeout_sec;
+ int ppi_irq;
void __iomem *membase;
struct platform_device *dev;
@@ -77,6 +78,12 @@ vcpu_stall_detect_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
+static irqreturn_t vcpu_stall_detector_irq(int irq, void *dev)
+{
+ panic("vCPU stall detector");
+ return IRQ_HANDLED;
+}
+
static int start_stall_detector_cpu(unsigned int cpu)
{
u32 ticks, ping_timeout_ms;
@@ -132,7 +139,7 @@ static int stop_stall_detector_cpu(unsigned int cpu)
static int vcpu_stall_detect_probe(struct platform_device *pdev)
{
- int ret;
+ int ret, irq;
struct resource *r;
void __iomem *membase;
u32 clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ;
@@ -169,9 +176,22 @@ static int vcpu_stall_detect_probe(struct platform_device *pdev)
vcpu_stall_config = (struct vcpu_stall_detect_config) {
.membase = membase,
.clock_freq_hz = clock_freq_hz,
- .stall_timeout_sec = stall_timeout_sec
+ .stall_timeout_sec = stall_timeout_sec,
+ .ppi_irq = -1,
};
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
+ ret = request_percpu_irq(irq,
+ vcpu_stall_detector_irq,
+ "vcpu_stall_detector",
+ vcpu_stall_detectors);
+ if (ret)
+ goto err;
+
+ vcpu_stall_config.ppi_irq = irq;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"virt/vcpu_stall_detector:online",
start_stall_detector_cpu,
@@ -184,6 +204,9 @@ static int vcpu_stall_detect_probe(struct platform_device *pdev)
vcpu_stall_config.hp_online = ret;
return 0;
err:
+ if (vcpu_stall_config.ppi_irq > 0)
+ free_percpu_irq(vcpu_stall_config.ppi_irq,
+ vcpu_stall_detectors);
return ret;
}
@@ -193,6 +216,10 @@ static void vcpu_stall_detect_remove(struct platform_device *pdev)
cpuhp_remove_state(vcpu_stall_config.hp_online);
+ if (vcpu_stall_config.ppi_irq > 0)
+ free_percpu_irq(vcpu_stall_config.ppi_irq,
+ vcpu_stall_detectors);
+
for_each_possible_cpu(cpu)
stop_stall_detector_cpu(cpu);
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 9a5f75163aca..8ede4ce93271 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -131,10 +131,12 @@ struct moxart_host {
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
+ struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
- struct sg_mapping_iter sg_miter;
+ u32 num_sg;
+ u32 data_remain;
u32 data_len;
u32 fifo_width;
u32 timeout;
@@ -146,6 +148,35 @@ struct moxart_host {
bool is_removed;
};
+static inline void moxart_init_sg(struct moxart_host *host,
+ struct mmc_data *data)
+{
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+ host->data_remain = host->cur_sg->length;
+
+ if (host->data_remain > host->data_len)
+ host->data_remain = host->data_len;
+}
+
+static inline int moxart_next_sg(struct moxart_host *host)
+{
+ int remain;
+ struct mmc_data *data = host->mrq->cmd->data;
+
+ host->cur_sg++;
+ host->num_sg--;
+
+ if (host->num_sg > 0) {
+ host->data_remain = host->cur_sg->length;
+ remain = host->data_len - data->bytes_xfered;
+ if (remain > 0 && remain < host->data_remain)
+ host->data_remain = remain;
+ }
+
+ return host->num_sg;
+}
+
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
@@ -278,29 +309,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
static void moxart_transfer_pio(struct moxart_host *host)
{
- struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
- /*
- * By updating sgm->consumes this will get a proper pointer into the
- * buffer at any time.
- */
- if (!sg_miter_next(sgm)) {
- /* This shold not happen */
- dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
- data->error = -EINVAL;
- complete(&host->pio_complete);
- return;
- }
- sgp = sgm->addr;
- remain = sgm->length;
- if (remain > host->data_len)
- remain = host->data_len;
- sgm->consumed = 0;
+ sgp = sg_virt(host->cur_sg);
+ remain = host->data_remain;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
@@ -315,7 +331,6 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
@@ -332,22 +347,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
}
- data->bytes_xfered += sgm->consumed;
- if (host->data_len == data->bytes_xfered) {
+ data->bytes_xfered += host->data_remain - remain;
+ host->data_remain = remain;
+
+ if (host->data_len != data->bytes_xfered)
+ moxart_next_sg(host);
+ else
complete(&host->pio_complete);
- return;
- }
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
- unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
u32 datactrl;
int blksz_bits;
@@ -358,19 +373,15 @@ static void moxart_prepare_data(struct moxart_host *host)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
+ moxart_init_sg(host, data);
+
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
- if (data->flags & MMC_DATA_WRITE) {
- flags |= SG_MITER_FROM_SG;
+ if (data->flags & MMC_DATA_WRITE)
datactrl |= DCR_DATA_WRITE;
- } else {
- flags |= SG_MITER_TO_SG;
- }
if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
- else
- sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
@@ -443,9 +454,6 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
request_done:
- if (!moxart_use_dma(host))
- sg_miter_stop(&host->sg_miter);
-
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 9053526fa212..150fb477b7cc 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -24,6 +24,7 @@
#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
@@ -384,6 +385,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
+ host->mmc_host_ops.card_busy = NULL;
+
/* Change the base clock frequency if the DT property exists */
if (device_property_read_u32(&pdev->dev, "clock-frequency",
&priv->base_freq_hz) != 0)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index ef89ec382bfe..23e6ba70144c 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1326,7 +1326,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
- return ret;
+ goto fail;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
@@ -1337,7 +1337,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
- return pci_write_config_byte(chip->pdev, 0xAE, scratch);
+ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
+
+fail:
+ return pcibios_err_to_errno(ret);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -2202,7 +2205,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
@@ -2211,7 +2214,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index d4a02184784a..058bef1c7e41 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
@@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
@@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
@@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
@@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
@@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
@@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
@@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
@@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
@@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
+
+read_fail:
+ return pcibios_err_to_errno(ret);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 746f4cf7ab03..112584aa0772 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
static int sdhci_check_ro(struct sdhci_host *host)
{
- unsigned long flags;
+ bool allow_invert = false;
int is_readonly;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
is_readonly = 0;
- else if (host->ops->get_ro)
+ } else if (host->ops->get_ro) {
is_readonly = host->ops->get_ro(host);
- else if (mmc_can_gpio_ro(host->mmc))
+ } else if (mmc_can_gpio_ro(host->mmc)) {
is_readonly = mmc_gpio_get_ro(host->mmc);
- else
+ /* Do not invert twice */
+ allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+ } else {
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
+ allow_invert = true;
+ }
- spin_unlock_irqrestore(&host->lock, flags);
+ if (is_readonly >= 0 &&
+ allow_invert &&
+ (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
+ is_readonly = !is_readonly;
- /* This quirk needs to be replaced by a callback-function later */
- return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
- !is_readonly : is_readonly;
+ return is_readonly;
}
#define SAMPLE_COUNT 5
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3c3fcce4acd4..d19aabf5d4fb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
+ info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1d9057dc44f2..bf1589aef1fc 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev)
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev)
out_free_irq:
free_irq(spi->irq, priv);
+ out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
@@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index 160528d3cc26..b1de8052a45c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
- if (mcp251xfd_tx_busy(priv, tx_ring))
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 24510b3b8020..b35bfebd23f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -633,6 +633,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 8faf8a462c05..7292c81fc0cd 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -294,7 +294,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f8ad7833f5d9..425e20daf1e9 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@@ -429,6 +427,57 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u32 pmavbc;
+ u8 status;
+ u16 pqm;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states.
+ * If you see this message, please read the errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (ret)
+ return ret;
+ if (status & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+ }
+ return ret;
+}
+
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -1299,6 +1348,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
+
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index ce1e656b800b..239a281da10b 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
bool ingress, struct netlink_ext_ack *extack);
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index f3a205ee483f..d5354c600ea1 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -247,6 +247,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -842,8 +843,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1167,6 +1168,11 @@
#define PORT_RMII_CLK_SEL BIT(7)
#define PORT_MII_SEL_EDGE BIT(5)
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
+
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1494,6 +1500,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1e0085cd9a9a..0580b2fee21c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1382,6 +1382,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1416,6 +1417,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1450,6 +1452,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1540,6 +1543,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1820,6 +1824,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
+ int ret;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
@@ -1861,6 +1866,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2185,7 +2196,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
@@ -3142,7 +3153,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
- interface = PHY_INTERFACE_MODE_RGMII;
+ interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index c784fd23a993..ee7db46e469d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -66,6 +66,7 @@ struct ksz_chip_data {
bool tc_cbs_supported;
const struct ksz_dev_ops *ops;
const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff4ed..43ac68052baf 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 2d8a66ea82fa..713a595370bf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c437ca1c0fd3..a6d69a45fa01 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -732,9 +732,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -756,6 +753,8 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags))
+ atomic_inc(&bp->ptp_cfg->tx_avail);
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
@@ -8996,6 +8995,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -15363,6 +15363,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 656ab81c0272..9cf0acfa04e5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1434,6 +1434,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -2267,6 +2318,7 @@ struct bnxt {
u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 06ea86c80be1..f219709f9563 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
+ * Copyright (c) 2018-2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -500,7 +500,11 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -609,8 +613,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 39
-#define HWRM_VERSION_STR "1.10.3.39"
+#define HWRM_VERSION_RSVD 44
+#define HWRM_VERSION_STR "1.10.3.44"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -664,6 +668,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -843,7 +848,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1326,13 +1333,13 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
@@ -1814,6 +1821,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1828,7 +1838,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[2];
+ __le16 max_tso_segs;
__le32 roce_vf_max_av;
__le32 roce_vf_max_cq;
__le32 roce_vf_max_mrw;
@@ -2449,6 +2459,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
#define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3660,22 +3671,24 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3772,18 +3785,20 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -3876,22 +3891,24 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3911,22 +3928,24 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -4202,7 +4221,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4217,6 +4237,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
u8 unused_2[6];
};
@@ -4292,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4451,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -5049,33 +5077,43 @@ struct hwrm_port_qstats_ext_output {
u8 valid;
};
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
};
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
__le64 lpbk_ucast_frames;
__le64 lpbk_mcast_frames;
__le64 lpbk_bcast_frames;
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
};
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
@@ -5140,13 +5178,15 @@ struct hwrm_port_clr_stats_output {
u8 valid;
};
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
struct hwrm_port_lpbk_clr_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -5287,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -7443,17 +7484,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
@@ -8520,17 +8561,17 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8576,17 +8617,17 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8635,17 +8676,17 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -9109,6 +9150,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
@@ -9758,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input {
__le16 instance;
__le16 unused_0;
u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
u8 unused_1[7];
};
@@ -10433,13 +10478,13 @@ struct hwrm_selftest_irq_output {
/* dbc_dbc (size:64b/8B) */
struct dbc_dbc {
- u32 index;
+ __le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
#define DBC_DBC_EPOCH 0x1000000UL
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
#define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
+ __le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
#define DBC_DBC_PATH_MASK 0x3000000UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..d2fd2d04ed47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..22898d3d088b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 96c6ea12279f..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f604119efc80..5f26fc3ad655 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a72d8a2eb0b3..881ece735dcf 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4130,6 +4130,14 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
@@ -4524,6 +4532,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4587,6 +4596,7 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index c1c912de59c7..1154c1d8f66f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index fe1b26a4d736..0b3cca3fc792 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ff71fb1eced9..a5fc0209d628 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c7c9..d36c4ed16d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 43cc6ee4d87d..82574ce0194f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -11319,6 +11324,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5e9a93bdb518..23ebeb143987 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2482,6 +2482,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[bufidx];
+
+ /* Sanity checks on our free map to make sure it points to an index
+ * that is not being occupied by another skb. If skb memory is
+ * not freed then we see congestion control kick in and halt tx.
+ */
+ if (unlikely(tx_buff->skb)) {
+ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
+ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
+ queue_num, bufidx);
+ dev_kfree_skb_any(tx_buff->skb);
+ }
+
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
@@ -4061,6 +4073,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
+ /* Clean any remaining outstanding SKBs
+ * we freed the irq so we won't be hearing
+ * from them
+ */
+ clean_tx_pools(adapter);
+
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f9e94be36e97..2e98a2a0bead 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1225,6 +1225,28 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ e1000e_enable_phy_retry(hw);
+ hw->phy.ops.release(hw);
+ goto out;
+ }
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
hw->phy.ops.release(hw);
out:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 220d62fca55d..da5c59daf8ba 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6623,7 +6623,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
- u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6697,23 +6696,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
-
- /* Force SMBUS to allow WOL */
- /* Switching PHY interface always returns MDI error
- * so disable retry mechanism to avoid wasting time
- */
- e1000e_disable_phy_retry(hw);
-
- e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
- smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
-
- e1000e_enable_phy_retry(hw);
-
- /* Force SMBus mode in MAC */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1f188c052828..284c3fad5a6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11171,6 +11171,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
ret = i40e_reset(pf);
if (!ret)
i40e_rebuild(pf, reinit, lock_acquired);
+ else
+ dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__);
}
/**
@@ -16335,6 +16337,139 @@ unmap:
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_hw *hw = &pf->hw;
+ u8 mac_addr[6];
+ u16 flags = 0;
+ int ret;
+
+ /* Get current MAC address in case it's an LAA */
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_io_suspend - suspend all IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_suspend(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_DOWN, pf->state);
+
+ /* Ensure service task will not be running */
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf, false);
+
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
+ i40e_enable_mc_magic_wake(pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+/**
+ * i40e_io_resume - resume IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_resume(struct i40e_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
+ if (err) {
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+ err);
+ }
+
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+}
+
+/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
* @error: the type of PCI error
@@ -16358,7 +16493,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf);
+ i40e_io_suspend(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16380,7 +16515,8 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (pci_enable_device_mem(pdev)) {
+ /* enable I/O and memory of the device */
+ if (pci_enable_device(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
@@ -16443,54 +16579,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- i40e_handle_reset_warning(pf, false);
-}
-
-/**
- * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
- * using the mac_address_write admin q function
- * @pf: pointer to i40e_pf struct
- **/
-static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
-{
- struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
- struct i40e_hw *hw = &pf->hw;
- u8 mac_addr[6];
- u16 flags = 0;
- int ret;
-
- /* Get current MAC address in case it's an LAA */
- if (main_vsi && main_vsi->netdev) {
- ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
- } else {
- dev_err(&pf->pdev->dev,
- "Failed to retrieve MAC address; using default\n");
- ether_addr_copy(mac_addr, hw->mac.addr);
- }
-
- /* The FW expects the mac address write cmd to first be called with
- * one of these flags before calling it again with the multicast
- * enable flags.
- */
- flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
-
- if (hw->func_caps.flex10_enable && hw->partition_id != 1)
- flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
-
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
- return;
- }
-
- flags = I40E_AQC_MC_MAG_EN
- | I40E_AQC_WOL_PRESERVE_ON_PFR
- | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret)
- dev_err(&pf->pdev->dev,
- "Failed to enable Multicast Magic Packet wake up\n");
+ i40e_io_resume(pf);
}
/**
@@ -16552,48 +16641,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- set_bit(__I40E_DOWN, pf->state);
-
- /* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
-
- /* Client close must be called explicitly here because the timer
- * has been stopped.
- */
- i40e_notify_client_of_netdev_close(pf, false);
-
- if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
- pf->wol_en)
- i40e_enable_mc_magic_wake(pf);
-
- /* Since we're going to destroy queues during the
- * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
- * whole section
- */
- rtnl_lock();
-
- i40e_prep_for_reset(pf);
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-
- /* Clear the interrupt scheme and release our IRQs so that the system
- * can safely hibernate even when there are a large number of CPUs.
- * Otherwise hibernation might fail when mapping all the vectors back
- * to CPU0.
- */
- i40e_clear_interrupt_scheme(pf);
-
- rtnl_unlock();
-
- return 0;
+ return i40e_io_suspend(pf);
}
/**
@@ -16603,39 +16655,11 @@ static int i40e_suspend(struct device *dev)
static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- int err;
/* If we're not suspended, then there is nothing to do */
if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- /* We need to hold the RTNL lock prior to restoring interrupt schemes,
- * since we're going to be restoring queues
- */
- rtnl_lock();
-
- /* We cleared the interrupt scheme when we suspended, so we need to
- * restore it now to resume device functionality.
- */
- err = i40e_restore_interrupt_scheme(pf);
- if (err) {
- dev_err(dev, "Cannot restore interrupt scheme: %d\n",
- err);
- }
-
- clear_bit(__I40E_DOWN, pf->state);
- i40e_reset_and_rebuild(pf, false, true);
-
- rtnl_unlock();
-
- /* Clear suspended state last after everything is recovered */
- clear_bit(__I40E_SUSPENDED, pf->state);
-
- /* Restart the service task */
- mod_timer(&pf->service_timer,
- round_jiffies(jiffies + pf->service_timer_period));
-
- return 0;
+ return i40e_io_resume(pf);
}
static const struct pci_error_handlers i40e_err_handler = {
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index c4b69655cdf5..704e9ad5144e 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1388,7 +1388,7 @@ enum ice_param_id {
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
};
-static const struct devlink_param ice_devlink_params[] = {
+static const struct devlink_param ice_dvl_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
ice_devlink_enable_roce_set,
@@ -1397,6 +1397,9 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+};
+
+static const struct devlink_param ice_dvl_sched_params[] = {
DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
"tx_scheduling_layers",
DEVLINK_PARAM_TYPE_U8,
@@ -1464,21 +1467,31 @@ int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_hw *hw = &pf->hw;
- size_t params_size;
+ int status;
- params_size = ARRAY_SIZE(ice_devlink_params);
+ status = devl_params_register(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ if (status)
+ return status;
- if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
- params_size--;
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ status = devl_params_register(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
- return devl_params_register(devlink, ice_devlink_params,
- params_size);
+ return status;
}
void ice_devlink_unregister_params(struct ice_pf *pf)
{
- devl_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
+
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ devl_params_unregister(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 6ad8002b22e1..99a75a59078e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -409,7 +409,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -747,6 +746,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
-
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ return ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
- ring->xsk_pool = NULL;
- return;
- }
-
- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
+ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
+};
+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 687f6cb2b917..5d396c1a7731 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 5649b257e631..24716a3b494c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3148,6 +3148,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index ce5034ed2b24..f182179529b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1339,6 +1339,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
for (i = 0; i < count; i++) {
bool last = false;
+ int try_cnt = 0;
int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -1346,8 +1347,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
if (indicate_last)
last = ice_is_last_download_buffer(bh, i, count);
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
+ while (1) {
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
+ last, &offset, &info,
+ NULL);
+ if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
+ break;
+
+ try_cnt++;
+
+ if (try_cnt == 5)
+ break;
+
+ msleep(20);
+ }
+
+ if (try_cnt)
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n",
+ try_cnt);
/* Save AQ status from download package */
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5371e91f6bbb..7629b0190578 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
- if (!vsi->af_xdp_zc_qps)
- goto err_zc_qps;
-
return 0;
-err_zc_qps:
- devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+ ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
- ice_destroy_xdp_rings(vsi);
+ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f60c022f7960..55a42aad92a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -805,6 +805,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -2707,17 +2710,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2730,8 +2788,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2750,49 +2807,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2842,22 +2865,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2920,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3031,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@@ -3020,7 +3043,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
@@ -4116,7 +4139,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
- int err = 0, timeout = 50;
+ int i, err = 0, timeout = 50;
if (!new_rx && !new_tx)
return -EINVAL;
@@ -4142,6 +4165,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(vsi->netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ }
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
@@ -5544,7 +5575,7 @@ static int ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5624,6 +5655,11 @@ static int ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 84eab92dc03c..59e8879ac059 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
*
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
*/
static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ u32 sr_copy;
+
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+ break;
+ case ICE_INACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+ break;
+ }
+
+ return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
}
/**
@@ -440,8 +454,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
+
+ /* The Preserved Fields Area contains a sequence of Type-Length-Value
+ * structures which define its contents. The PFA length includes all
+ * of the TLVs, plus the initial length word itself, *and* one final
+ * word at the end after all of the TLVs.
+ */
+ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
+
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
}
/* Module does not exist */
return -ENOENT;
@@ -1010,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ int status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+ &banks->active_css_hdr_len);
+ if (status)
+ return status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+ &banks->inactive_css_hdr_len);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
return status;
}
+ status = ice_determine_css_hdr_len(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+ return status;
+ }
+
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 94d6670d0901..1191031b2a43 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1899,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -2922,7 +2923,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f0796a93f428..eef397e5baa0 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -482,6 +482,8 @@ struct ice_bank_info {
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
+ u32 active_css_hdr_len; /* Active CSS header length */
+ u32 inactive_css_hdr_len; /* Inactive CSS header length */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
@@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 2e9ad27cb9d1..6e8f2aab6080 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return -EINVAL;
err = ice_fltr_add_vlan(vsi, vlan);
- if (err && err != -EEXIST) {
+ if (!err)
+ vsi->num_vlan++;
+ else if (err == -EEXIST)
+ err = 0;
+ else
dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
vlan->vid, vsi->vsi_num, err);
- return err;
- }
- vsi->num_vlan++;
- return 0;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 7541f223bf4f..a65955eb23c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
- clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
- set_bit(qid, vsi->af_xdp_zc_qps);
-
return 0;
}
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{
struct ice_rx_ring *rx_ring;
- unsigned long q;
+ uint i;
+
+ ice_for_each_rxq(vsi, i) {
+ rx_ring = vsi->rx_rings[i];
+ if (!rx_ring->xsk_pool)
+ continue;
- for_each_set_bit(q, vsi->af_xdp_zc_qps,
- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
- rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 52ceda6306a3..f1ee5584e8fa 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -1394,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
}
idpf_rx_init_buf_tail(vport);
+ idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 285da2177ee4..b023704bbbda 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3746,9 +3746,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -4179,7 +4179,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
@@ -4193,17 +4192,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 3d046b81e507..551391e20464 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -990,6 +990,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f2c4f1966bb0..0cd2bd695db1 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
struct igc_hw *hw = &adapter->hw;
u32 eeer;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
+
if (hw->dev_spec._base.eee_enable)
mii_eee_cap1_mod_linkmode_t(edata->advertised,
adapter->eee_advert);
- *edata = adapter->eee;
-
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 12f004f46082..87b655b839c1 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
#include <net/ipv6.h>
@@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter)
/* start the watchdog. */
hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
+
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
+ MDIO_EEE_2_5GT;
}
/**
@@ -7028,6 +7032,8 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+ igc_ptp_init(adapter);
+
igc_tsn_clear_schedule(adapter);
/* reset the hardware with the new settings */
@@ -7049,9 +7055,6 @@ static int igc_probe(struct pci_dev *pdev,
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
- /* do hw tstamp init after resetting */
- igc_ptp_init(adapter);
-
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e91486c48de3..9adf4301c9b1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4014,7 +4014,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -6904,6 +6907,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
device_set_node(&dev->dev, port_fwnode);
+ dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e8b73b9d75e3..97722ce8c4cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..64a97a0a10ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..87d5776e3b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2917..aa01110f04a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 99ddf31269d9..458d34a62e18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -113,6 +113,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -124,3 +125,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 45a32e4b49d1..e3aee6e36215 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -139,33 +139,34 @@
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a16e9f244117..3eb85949677a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -513,7 +513,7 @@ process_cqe:
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
@@ -1174,8 +1174,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 070711df612e..0f844c14485a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
num_regs++;
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
-
} else if (level == NIX_TXSCH_LVL_TL4) {
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL3) {
@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
/* check if node is root */
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
mtu_to_dwrr_weight(pfvf,
pfvf->tx_max_pktlen);
num_regs++;
@@ -1422,7 +1421,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1632,6 +1634,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
dwrr_del_node = true;
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index cae46290a7ae..c84ce54a84a0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_QDMA_RING_SIZE;
+ int cnt = soc->tx.fq_dma_size;
dma_addr_t dma_addr;
- int i;
+ int i, j, len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
eth->scratch_ring = eth->sram_base;
@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
+
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
- dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+ if (unlikely(!eth->scratch_head[j]))
+ return -ENOMEM;
- for (i = 0; i < cnt; i++) {
- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- struct mtk_tx_dma_v2 *txd;
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
- txd = eth->scratch_ring + i * soc->tx.desc_size;
- txd->txd1 = addr;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->tx.desc_size;
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
- txd->txd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
- txd->txd5 = 0;
- txd->txd6 = 0;
- txd->txd7 = 0;
- txd->txd8 = 0;
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+ txd->txd2 = eth->phy_scratch_ring +
+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
}
@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
- ring_size = MTK_DMA_SIZE;
+ ring_size = soc->tx.dma_size;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem;
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + ring_size * sz;
- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
} else {
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size, tx_ring_size;
int i;
@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
tx_ring_size = MTK_QDMA_RING_SIZE;
else
- tx_ring_size = MTK_DMA_SIZE;
+ tx_ring_size = soc->tx.dma_size;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
- rx_dma_size = MTK_DMA_SIZE;
+ rx_dma_size = soc->rx.dma_size;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
- kfree(eth->scratch_head);
+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+ kfree(eth->scratch_head[i]);
+ eth->scratch_head[i] = NULL;
+ }
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(4K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma_v2),
@@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = {
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 4eab30b44070..f5174f6cb1bb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
-#define MTK_DMA_SIZE 512
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -1176,6 +1178,8 @@ struct mtk_soc_data {
u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
+ u32 dma_size;
+ u32 fq_dma_size;
} tx;
struct {
u32 desc_size;
@@ -1183,6 +1187,7 @@ struct mtk_soc_data {
u32 dma_l4_valid;
u32 dma_max_len;
u32 dma_len_offset;
+ u32 dma_size;
} rx;
};
@@ -1264,7 +1269,7 @@ struct mtk_eth {
struct napi_struct rx_napi;
void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index caa34b9c161e..33e32584b07f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ struct udphdr *udphdr;
- udp_hdr(skb)->len = htons(payload_len);
+ if (skb->encapsulation)
+ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+ else
+ udphdr = udp_hdr(skb);
+
+ udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 41a2543a52cd..e51b03d4c717 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -750,8 +750,7 @@ err_fs:
err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 82064614846f..359050f0b54d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
- if (xo->inner_ipproto) {
- /* Cannot support tunnel packet over IPsec tunnel mode
- * because we cannot offload three IP header csum
- */
- if (x->props.mode == XFRM_MODE_TUNNEL)
- goto out_disable;
-
- /* Only support UDP or TCP L4 checksum */
- if (xo->inner_ipproto != IPPROTO_UDP &&
- xo->inner_ipproto != IPPROTO_TCP)
- goto out_disable;
- }
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto &&
+ xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
return features;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b758bc72ac36..a605eae56685 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3886,7 +3886,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4875,7 +4875,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4901,7 +4901,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e211c41cec06..e1ed214e8651 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1186,6 +1186,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
ts_stats->err = 0;
ts_stats->lost = 0;
+ if (!ptp)
+ goto out;
+
/* Aggregate stats across all TCs */
for (i = 0; i < ptp->num_tc; i++) {
struct mlx5e_ptp_cq_stats *stats =
@@ -1214,6 +1217,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
}
}
+out:
mutex_unlock(&priv->state_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 099bf1078889..b09e9abd39f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2d95a9b7b44e..b61b7d966114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ return -EACCES;
+ }
cond_resched();
} while (!time_after(jiffies, end));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index ad38e31822df..a6329ca2d9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -248,6 +248,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ goto unlock;
+ }
msleep(20);
} while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+ return -EACCES;
+ }
msleep(100);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index f7f0476a4a58..d0871c46b8c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev;
u8 mode;
#endif
+ bool roce_support;
int i;
for (i = 0; i < ldev->ports; i++)
@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
+ roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
+ for (i = 1; i < ldev->ports; i++)
+ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+ return false;
+
return true;
}
@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++)
- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ for (i = 1; i < ldev->ports; i++) {
+ if (mlx5_get_roce_state(ldev->pf[i].dev))
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ }
} else if (shared_fdb) {
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index c16b462ddedf..ab2717012b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- while (i--)
- while (j--)
+ do {
+ while (j--) {
+ idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ j = ldev->buckets;
+ } while (i--);
goto destroy_fg;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index 6b774e0c2766..d0b595ba6110 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
+ if (pci_channel_offline(dev->pdev)) {
+ ret = -EACCES;
+ goto pci_unlock;
+ }
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index dd5d186dc614..f6deb5a3f820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
- /* Feature is currently implemented for PFs only */
- if (!mlx5_core_is_pf(dev))
- return false;
-
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ /* Block on embedded CPU PFs */
+ if (mlx5_core_is_ecpf(dev))
+ return 0;
+
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6574c145dc1e..459a836a5d9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
+ else
+ mlx5_stop_health_poll(dev, boot);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index bf66d996e32e..c0ced4d315f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1594,18 +1594,25 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
+ bool pci_reset_sbr_supported)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
int err;
+ if (!pci_reset_sbr_supported) {
+ pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
+ goto sbr;
+ }
+
mlxsw_reg_mrsr_pack(mrsr_pl,
MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
if (err)
return err;
+sbr:
device_lock_assert(&pdev->dev);
pci_cfg_access_lock(pdev);
@@ -1633,6 +1640,7 @@ static int
mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ bool pci_reset_sbr_supported = false;
char mcam_pl[MLXSW_REG_MCAM_LEN];
bool pci_reset_supported = false;
u32 sys_status;
@@ -1652,13 +1660,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (!err)
+ if (!err) {
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
&pci_reset_supported);
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
+ &pci_reset_sbr_supported);
+ }
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
- err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
+ pci_reset_sbr_supported);
} else {
pci_dbg(pdev, "Starting software reset flow\n");
err = mlxsw_pci_reset_sw(mlxsw_pci);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8adf86a6f5cc..3bb89045eaf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -10671,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
MLXSW_REG_MCAM_MCIA_128B = 34,
/* If set, MRSR.command=6 is supported. */
MLXSW_REG_MCAM_PCI_RESET = 48,
+ /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
+ MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c9f1c79f3f9d..ba090262e27e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
+ u16 local_port, local_port_1, first_local_port, last_local_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
@@ -1628,6 +1628,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1645,9 +1646,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
@@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, last_local_port;
+ u16 local_port, first_local_port, last_local_port;
LIST_HEAD(bulk_list);
unsigned int masked_count;
u8 current_page = 0;
@@ -1702,6 +1706,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1719,9 +1724,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index d0f4ff4ee075..0d1740d64676 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1127,8 +1127,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
+
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1143,7 +1147,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1164,10 +1200,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 6be8a43c908a..e418539565b1 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3118,6 +3118,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3575,7 +3586,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3587,10 +3598,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3686,7 +3696,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3710,6 +3720,7 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
@@ -3728,6 +3739,30 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3736,9 +3771,6 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e52e..3b2585a384e2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -61,6 +61,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +228,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +315,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d087cf954f75..608ad31a9702 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2798,6 +2798,8 @@ static int add_adev(struct gdma_dev *gd)
if (ret)
goto init_fail;
+ /* madev is owned by the auxiliary device */
+ madev = NULL;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f30eee4a5a80..b6c01a88098d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 24870da3f484..1837a30ba08a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
@@ -1191,7 +1189,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 5dba6d2d633c..9fdd7cd3ef19 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp);
+ struct ionic_txq_comp *comp,
+ bool in_napi);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
@@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
+static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
+{
+ int i;
+
+ for (i = 0; i < nbufs; i++) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ buf_info++;
+ }
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct bpf_prog *xdp_prog,
@@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int remain_len;
+ int nbufs = 1;
int frag_len;
int err = 0;
@@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
+ nbufs += sinfo->nr_frags;
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
@@ -574,9 +591,6 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
goto out_xdp_abort;
}
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
buf_info->page,
buf_info->page_offset,
@@ -586,22 +600,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
/* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
- /* unmap the pages before handing them to a different device */
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (err) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
@@ -934,7 +945,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
- work_done = ionic_tx_cq_service(cq, budget);
+ work_done = ionic_tx_cq_service(cq, budget, !!budget);
if (unlikely(!budget))
return budget;
@@ -1018,7 +1029,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
if (unlikely(!budget))
return budget;
@@ -1151,7 +1162,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp)
+ struct ionic_txq_comp *comp,
+ bool in_napi)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -1203,11 +1215,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
desc_info->bytes = skb->len;
stats->clean++;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
}
static bool ionic_tx_service(struct ionic_cq *cq,
- unsigned int *total_pkts, unsigned int *total_bytes)
+ unsigned int *total_pkts,
+ unsigned int *total_bytes,
+ bool in_napi)
{
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -1229,7 +1243,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, comp);
+ ionic_tx_clean(q, desc_info, comp, in_napi);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1243,7 +1257,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
return true;
}
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi)
{
unsigned int work_done = 0;
unsigned int bytes = 0;
@@ -1252,7 +1268,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0)
return 0;
- while (ionic_tx_service(cq, &pkts, &bytes)) {
+ while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
@@ -1278,7 +1294,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
{
u32 work_done;
- work_done = ionic_tx_cq_service(cq, cq->num_descs);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
if (work_done)
ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -1295,7 +1311,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL);
+ ionic_tx_clean(q, desc_info, NULL, false);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e9028e..ad06da0fdaa0 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "INTR : %lx\n",
+ qca->intr);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88a87..8f7ce6b51a1c 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!test_bit(SPI_INTR, &qca->intr) &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->intr,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->intr);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->intr);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352cee..8f4808695e82 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long intr;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fdb59..65d7370b47d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
};
@@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757dfe5..5ef52ef2698f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 222540b55480..996f2bcd07a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 79ba47bb3602..f7d21da1a0fb 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -455,7 +455,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 51495cb4b9be..838e85ddec67 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
@@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index ccfc83857c26..9e366f275406 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1193,7 +1193,6 @@ static int baycom_epp_par_probe(struct pardevice *par_dev)
static struct parport_driver baycom_epp_par_driver = {
.name = "bce",
.probe = baycom_epp_par_probe,
- .devmodel = true,
};
static void __init baycom_epp_dev_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index fd7da5bb1fa5..00ebc25d0b22 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -503,7 +503,6 @@ static int baycom_par_probe(struct pardevice *par_dev)
static struct parport_driver baycom_par_driver = {
.name = "bcp",
.probe = baycom_par_probe,
- .devmodel = true,
};
static int __init init_baycompar(void)
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2d5b021b4ea6..fef4eff7753a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- err = ip_local_out(net, skb->sk, skb);
+ err = ip_local_out(net, NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ err = ip6_local_out(dev_net(dev), NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index c22897bf5509..017a6102be0a 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nsim->peer);
- iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
+ READ_ONCE(dev->ifindex);
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index a4d2e76a8d58..16789cd446e9 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
skb_scrub_packet(skb, xnet);
skb->priority = 0;
nf_skip_egress(skb, true);
+ skb_reset_mac_header(skb);
}
static struct netkit *netkit_priv(const struct net_device *dev)
@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
+ eth_skb_pkt_type(skb, peer);
skb->dev = peer;
entry = rcu_dereference(nk->active);
if (entry)
@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
switch (ret) {
case NETKIT_NEXT:
case NETKIT_PASS:
- skb->protocol = eth_type_trans(skb, skb->dev);
+ eth_skb_pull_mac(skb);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
dev_sw_netstats_tx_add(dev, 1, len);
@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
/* Nothing to do, we receive whatever gets pushed to us! */
}
+static int netkit_set_macaddr(struct net_device *dev, void *sa)
+{
+ struct netkit *nk = netkit_priv(dev);
+
+ if (nk->mode != NETKIT_L2)
+ return -EOPNOTSUPP;
+
+ return eth_mac_addr(dev, sa);
+}
+
static void netkit_set_headroom(struct net_device *dev, int headroom)
{
struct netkit *nk = netkit_priv(dev), *nk2;
@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_start_xmit = netkit_xmit,
.ndo_set_rx_mode = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom,
+ .ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats,
@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
if (!attr)
return 0;
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Setting Ethernet address is not supported");
- return -EOPNOTSUPP;
+ if (nla_len(attr) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(attr)))
+ return -EADDRNOTAVAIL;
+ return 0;
}
static struct rtnl_link_ops netkit_link_ops;
@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
strscpy(ifname, "nk%d", IFNAMSIZ);
ifname_assign_type = NET_NAME_ENUM;
}
+ if (mode != NETKIT_L2 &&
+ (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
+ return -EOPNOTSUPP;
net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net))
@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev);
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0)
goto err_configure_peer;
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 326c9770a6dc..c706429b225a 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -17,6 +17,11 @@
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
+#define DP83TG720S_LPS_CFG3 0x18c
+/* Power modes are documented as bit fields but used as values */
+/* Power Mode 0 is Normal mode */
+#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
@@ -31,11 +36,20 @@
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
+ int ret;
+
/* Autoneg is not supported and this PHY supports only one speed.
* We need to care only about master/slave configuration if it was
* changed by user.
*/
- return genphy_c45_pma_baset1_setup_master_slave(phydev);
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ /* Re-read role configuration to make changes visible even if
+ * the link is in administrative down state.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static int dp83tg720_read_status(struct phy_device *phydev)
@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
return ret;
/* After HW reset we need to restore master/slave configuration.
+ * genphy_c45_pma_baset1_read_master_slave() call will be done
+ * by the dp83tg720_config_aneg() function.
*/
ret = dp83tg720_config_aneg(phydev);
if (ret)
@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
*/
usleep_range(1000, 2000);
- if (phy_interface_is_rgmii(phydev))
- return dp83tg720_config_rgmii_delay(phydev);
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = dp83tg720_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* In case the PHY is bootstrapped in managed mode, we need to
+ * wake it.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
+ DP83TG720S_LPS_CFG3_PWR_MODE_0);
+ if (ret)
+ return ret;
- return 0;
+ /* Make role configuration visible for ethtool on init and after
+ * rest.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static struct phy_driver dp83tg720_driver[] = {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 13e30ea7eec5..ebafedde0ab7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
+ /* Chip can be powered down by the bootstrap code. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (ret & BMCR_PDOWN) {
+ ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 2000);
+ }
+
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
@@ -1939,7 +1950,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
{0x1c, 0x20, 0xeeee},
};
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
{
int err;
int i;
@@ -1967,16 +1978,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ err = genphy_restart_aneg(phydev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ err = ksz9477_phy_errata(phydev);
+ if (err)
+ return err;
+ }
+
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
phydev->eee_broken_modes = -1;
- err = genphy_restart_aneg(phydev);
- if (err)
- return err;
-
return kszphy_config_init(phydev);
}
@@ -2085,6 +2110,71 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+static int ksz9477_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* No need to initialize registers if not powered down. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ ret = ksz9477_phy_errata(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* This function can be called twice when the Ethernet device is on. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+ usleep_range(1000, 2000);
+
+ /* Re-program the value after chip is reset. */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -4029,7 +4119,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
err = phy_read(phydev, LAN8814_INTS);
- if (err)
+ if (err < 0)
return err;
/* Enable / disable interrupts. It is OK to enable PTP interrupt
@@ -4045,6 +4135,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
return err;
err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ /* Getting a positive value doesn't mean that is an error, it
+ * just indicates what was the status. Therefore make sure to
+ * clear the value and say that there is no error.
+ */
+ err = 0;
}
return err;
@@ -5327,10 +5425,11 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -5484,7 +5583,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = ksz9477_resume,
.get_features = ksz9477_get_features,
} };
@@ -5508,6 +5607,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index b2d36a3a96f1..e5f8ac4b4604 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -107,6 +107,7 @@ struct gpy_priv {
u8 fw_major;
u8 fw_minor;
+ u32 wolopts;
/* It takes 3 seconds to fully switch out of loopback mode before
* it can safely re-enter loopback mode. Record the time when
@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
}
#endif
+static int gpy_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
{
struct gpy_priv *priv = phydev->priv;
@@ -262,16 +272,8 @@ out:
static int gpy_config_init(struct phy_device *phydev)
{
- int ret;
-
- /* Mask all interrupts */
- ret = phy_write(phydev, PHY_IMASK, 0);
- if (ret)
- return ret;
-
- /* Clear all pending interrupts */
- ret = phy_read(phydev, PHY_ISTAT);
- return ret < 0 ? ret : 0;
+ /* Nothing to configure. Configuration Requirement Placeholder */
+ return 0;
}
static int gpy21x_config_init(struct phy_device *phydev)
@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
static int gpy_config_intr(struct phy_device *phydev)
{
+ struct gpy_priv *priv = phydev->priv;
u16 mask = 0;
+ int ret;
+
+ ret = gpy_ack_interrupt(phydev);
+ if (ret)
+ return ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
mask = PHY_IMASK_MASK;
+ if (priv->wolopts & WAKE_MAGIC)
+ mask |= PHY_IMASK_WOL;
+
+ if (priv->wolopts & WAKE_PHY)
+ mask |= PHY_IMASK_LSTC;
+
return phy_write(phydev, PHY_IMASK, mask);
}
@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct net_device *attach_dev = phydev->attached_dev;
+ struct gpy_priv *priv = phydev->priv;
int ret;
if (wol->wolopts & WAKE_MAGIC) {
@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
ret = phy_read(phydev, PHY_ISTAT);
if (ret < 0)
return ret;
+
+ priv->wolopts |= WAKE_MAGIC;
} else {
/* Disable magic packet matching */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
WOL_EN);
if (ret < 0)
return ret;
+
+ /* Disable the WOL interrupt */
+ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ priv->wolopts &= ~WAKE_MAGIC;
}
if (wol->wolopts & WAKE_PHY) {
@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
phy_trigger_machine(phydev);
+ priv->wolopts |= WAKE_PHY;
return 0;
}
+ priv->wolopts &= ~WAKE_PHY;
/* Disable the link state change interrupt */
return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
}
@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
static void gpy_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int ret;
+ struct gpy_priv *priv = phydev->priv;
wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
- if (ret & WOL_EN)
- wol->wolopts |= WAKE_MAGIC;
-
- ret = phy_read(phydev, PHY_IMASK);
- if (ret & PHY_IMASK_LSTC)
- wol->wolopts |= WAKE_PHY;
+ wol->wolopts = priv->wolopts;
}
static int gpy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 3f9cbd797fd6..a5684ef5884b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
- if (sfp->sm_mod_state > SFP_MOD_PROBE)
- sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index cc7d1113ece0..e39bfaefe8c5 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1358,7 +1358,6 @@ static struct parport_driver plip_driver = {
.probe = plip_probe,
.match_port = plip_attach,
.detach = plip_detach,
- .devmodel = true,
};
static void __exit plip_cleanup_module (void)
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 577ea904b3d9..7fab916a7f46 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -23,6 +23,7 @@ config PSE_REGULATOR
config PSE_PD692X0
tristate "PD692X0 PSE controller"
depends on I2C
+ select FW_LOADER
select FW_UPLOAD
help
This module provides support for PD692x0 regulator based Ethernet
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51c295e1e823..b034ef8a73ea 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -174,7 +174,6 @@ struct ax88179_data {
u32 wol_supported;
u32 wolopts;
u8 disconnecting;
- u8 initialized;
};
struct ax88179_int_data {
@@ -327,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
if (netif_carrier_ok(dev->net) != link) {
usbnet_link_change(dev, link, 1);
- netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+ if (!link)
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
}
}
@@ -1543,6 +1543,7 @@ static int ax88179_link_reset(struct usbnet *dev)
GMII_PHY_PHYSR, 2, &tmp16);
if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
return 0;
} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
@@ -1580,6 +1581,8 @@ static int ax88179_link_reset(struct usbnet *dev)
netif_carrier_on(dev->net);
+ netdev_info(dev->net, "ax88179 - Link status is: 1\n");
+
return 0;
}
@@ -1678,12 +1681,21 @@ static int ax88179_reset(struct usbnet *dev)
static int ax88179_net_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = dev->driver_priv;
+ u16 tmp16;
- if (ax179_data->initialized)
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
+ 2, &tmp16);
+ if (tmp16) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
+ tmp16 |= AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+ } else {
ax88179_reset(dev);
- else
- ax179_data->initialized = 1;
+ }
return 0;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 663e46348ce3..386d62769ded 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1372,6 +1372,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 97afd7335d86..01a3b2417a54 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
- short lpa, bmcr;
+ short lpa = 0;
+ short bmcr = 0;
u32 supported;
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index cbea24666479..8e82184be5e7 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
static int smsc95xx_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
- u32 read_buf, write_buf, burst_cap;
+ u32 read_buf, burst_cap;
int ret = 0, timeout;
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
+ if (ret < 0)
+ return ret;
/* Configure GPIO pins as LED outputs */
- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
- LED_GPIO_CFG_FDX_LED;
- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
+ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+ LED_GPIO_CFG_FDX_LED;
+ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
if (ret < 0)
return ret;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a802c0ea2cb..ea10db9a09fa 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1360,6 +1360,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ goto err_xdp;
+
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1677,6 +1681,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (unlikely(hdr->hdr.gso_type))
return NULL;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return NULL;
+
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
@@ -1943,6 +1951,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
+ u8 flags;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -1951,6 +1960,15 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
+ /* 1. Save the flags early, as the XDP program might overwrite them.
+ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
+ * stay valid after XDP processing.
+ * 2. XDP doesn't work with partially checksummed packets (refer to
+ * virtnet_xdp_set()), so packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
+ */
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
+
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
@@ -1966,7 +1984,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
@@ -2686,6 +2704,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
{
struct scatterlist *sgs[5], hdr, stat;
u32 out_num = 0, tmp, in_num = 0;
+ bool ok;
int ret;
/* Caller should know better */
@@ -2731,8 +2750,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
}
unlock:
+ ok = vi->ctrl->status == VIRTIO_NET_OK;
mutex_unlock(&vi->cvq_lock);
- return vi->ctrl->status == VIRTIO_NET_OK;
+ return ok;
}
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
@@ -4257,7 +4277,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
- int ret = 0;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4267,27 +4286,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
- /* Acquire all queues dim_locks */
- for (i = 0; i < vi->max_queue_pairs; i++)
- mutex_lock(&vi->rq[i].dim_lock);
-
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = true;
- goto unlock;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
+ return 0;
}
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
- if (!coal_rx) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!coal_rx)
+ return -ENOMEM;
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = false;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
}
/* Since the per-queue coalescing params can be set,
@@ -4300,22 +4319,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
- &sgs_rx)) {
- ret = -EINVAL;
- goto unlock;
- }
+ &sgs_rx))
+ return -EINVAL;
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
- }
-unlock:
- for (i = vi->max_queue_pairs - 1; i >= 0; i--)
mutex_unlock(&vi->rq[i].dim_lock);
+ }
- return ret;
+ return 0;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -4417,9 +4433,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (err)
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
dev->name, qnum);
- dim->state = DIM_START_MEASURE;
}
out:
+ dim->state = DIM_START_MEASURE;
mutex_unlock(&rq->dim_lock);
}
@@ -5668,8 +5684,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
- dev->features |= NETIF_F_RXCSUM;
+
+ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
+ * need to calculate checksums for partially checksummed packets,
+ * as they're considered valid by the upper layer.
+ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
+ * receives fully checksummed packets. The device may assist in
+ * validating these packets' checksums, so the driver won't have to.
+ */
+ dev->features |= NETIF_F_RXCSUM;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_GRO_HW;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 89ca6e75fcc6..63822d454c00 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
- rq->data_ring.desc_size = 0;
}
+ rq->data_ring.desc_size = 0;
}
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index f78dd0438843..ba59e92ab941 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
struct vxlan_fdb *f;
u32 ifindex = 0;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(src_mac))
+ return true;
+
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
- /* Ignore packets from invalid src-address */
- if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
- return false;
-
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -2339,7 +2339,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_key *pkey;
struct ip_tunnel_key key;
struct vxlan_dev *vxlan = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
@@ -2353,8 +2353,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
bool use_cache;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ bool no_eth_encap;
__be32 vni = 0;
+ no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB);
+ if (!skb_vlan_inet_prepare(skb, no_eth_encap))
+ goto drop;
+
+ old_iph = ip_hdr(skb);
+
info = skb_tunnel_info(skb);
use_cache = ip_tunnel_dst_cache_usable(skb, info);
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index e6ea884cafc1..4f385f4a8cef 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -45,6 +45,7 @@ config ATH10K_SNOC
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 3cc817a3b4a4..b82e8fb28541 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = true,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 3,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4f62e38ba48b..9b96dbb21d83 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -7988,8 +7988,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct cur_regulatory_info *reg_info;
- enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -8000,17 +7998,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
ctx->def.chan->band == NL80211_BAND_6GHZ &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- reg_info = &ab->reg_info_store[ar->pdev_idx];
- power_type = vif->bss_conf.power_type;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
- if (power_type == IEEE80211_REG_UNSET_AP) {
- ret = -EINVAL;
- goto out;
- }
-
- ath11k_reg_handle_chan_list(ab, reg_info, power_type);
arvif->chanctx = *ctx;
ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
}
@@ -9626,6 +9613,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ enum ieee80211_ap_reg_power power_type;
+ struct cur_regulatory_info *reg_info;
struct ath11k_peer *peer;
int ret = 0;
@@ -9705,6 +9694,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
+
+ if (!ret &&
+ ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->chanctx.def.chan &&
+ arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+ reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_warn(ar->ab, "invalid power type %d\n",
+ power_type);
+ ret = -EINVAL;
+ } else {
+ ret = ath11k_reg_handle_chan_list(ar->ab,
+ reg_info,
+ power_type);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to handle chan list with power type %d\n",
+ power_type);
+ }
+ }
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 79eb3f9c902f..debe7c5919ef 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
+ struct ath11k_ext_irq_grp *irq_grp;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
@@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
irq_grp->napi_ndev = alloc_netdev_dummy(0);
- if (!irq_grp->napi_ndev)
- return -ENOMEM;
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
@@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0) {
- for (n = 0; n <= i; n++) {
- irq_grp = &ab->ext_irq_grp[n];
- free_netdev(irq_grp->napi_ndev);
- }
- return irq;
+ ret = irq;
+ goto fail_irq;
}
ab->irq_num[irq_idx] = irq;
@@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
}
return 0;
+fail_irq:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 33654f228ee8..d156a9c64194 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1815,8 +1815,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_dbg_tlv_free(drv->trans);
#endif
+ iwl_dbg_tlv_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 71e6b06481a9..54f4acbbd05b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
void *_data)
{
struct wowlan_key_gtk_type_iter *data = _data;
+ __le32 *cipher = NULL;
+
+ if (key->keyidx == 4 || key->keyidx == 5)
+ cipher = &data->kek_kck_cmd->igtk_cipher;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ cipher = &data->kek_kck_cmd->bigtk_cipher;
switch (key->cipher) {
default:
@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
return;
case WLAN_CIPHER_SUITE_AES_CMAC:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
return;
case WLAN_CIPHER_SUITE_CCMP:
if (!sta)
@@ -2341,7 +2350,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES, 0) < 10) {
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 79f4ac8cbc72..8101ecbb478b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1617,6 +1617,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
&beacon_cmd.tim_size,
beacon->data, beacon->len);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ BEACON_TEMPLATE_CMD, 0) >= 14) {
+ u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+ }
+
iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e7f5978ef2d7..f4937a100cbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
- __le32 *dump_data = mfu_dump_notif->data;
- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
- int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
-
- for (i = 0; i < n_words; i++)
- IWL_DEBUG_INFO(mvm,
- "MFUART assert dump, dword %u: 0x%08x\n",
- le16_to_cpu(mfu_dump_notif->index_num) *
- n_words + i,
- le32_to_cpu(dump_data[i]));
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -895,8 +885,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 5a06f887769a..5144fa0f96b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -873,7 +873,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
{
struct ieee80211_mgmt *mgmt = (void *)beacon;
const u8 *ie;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 486a6b8f3c97..18ce060df9b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1128,6 +1128,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct ieee80211_vif *vif;
+ int link_id;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvm_sta->vif;
+
+ if (!sta->valid_links)
+ return;
+
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+ /*
+ * We have a link STA but the link is inactive in
+ * mac80211. This will happen if we failed to
+ * deactivate the link but mac80211 roll back the
+ * deactivation of the link.
+ * Delete the stale data to avoid issues later on.
+ */
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+ link_id, false);
+ }
+ }
+}
+
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_mvm_stop_device(mvm);
@@ -1150,6 +1183,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+ /* cleanup stations as links may be gone after restart */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_cleanup_sta_iterator, mvm);
+
mvm->p2p_device_vif = NULL;
iwl_mvm_reset_phy_ctxts(mvm);
@@ -4758,7 +4795,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
- } else if (fw_ver == 3) {
+ } else if (fw_ver >= 3) {
ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
ROC_ACTIVITY_HOTSPOT);
} else {
@@ -6348,7 +6385,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
.len[0] = sizeof(cmd),
.data[1] = data,
.len[1] = size,
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
};
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 0a3b7284eedd..fcfd2dd7568e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_free_bf;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index b7a461dba41e..9d139b56e152 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
}
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta,
- struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
cmd.modify.tid = cpu_to_le32(data->tid);
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+ sizeof(cmd), &cmd);
data->sta_mask = new_sta_mask;
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1f58c727fa63..0a1959bd4079 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1758,6 +1758,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 376b23b409dc..6cd4ec4d8f34 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -122,13 +122,8 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index d78af2928152..489cfb0a4ab1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -2450,8 +2450,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
*
* We mark it as mac header, for upper layers to know where
* all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
*/
- skb_reset_mac_header(skb);
+ skb_set_mac_header(skb, skb->len);
/*
* Override the nss from the rx_vec since the rate_n_flags has
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a7ec172eeade..e975f5ff17b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
general_params->adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
general_params->adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
break;
}
- if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
memcpy(&pp->bssid_array[idex_b++],
scan_6ghz_params[j].bssid, ETH_ALEN);
}
@@ -1827,7 +1830,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
*/
if (!iwl_mvm_is_scan_fragmented(params->type)) {
if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
+ psc_no_listen) {
if (unsolicited_probe_on_chan) {
max_s_ssids = 2;
max_bssids = 6;
@@ -3319,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- 0, sizeof(cmd), &cmd);
+ CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 20d4968d692a..cc79fe991c26 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2848,7 +2848,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
};
- u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_SEND_IN_RFKILL,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
int ret;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2860,7 +2865,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.alloc.ssn = cpu_to_le16(ssn);
cmd.alloc.win_size = cpu_to_le16(buf_size);
baid = -EIO;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
cmd.remove_v1.baid = cpu_to_le32(baid);
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
} else {
@@ -2869,8 +2874,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.remove.tid = cpu_to_le32(tid);
}
- ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
- &cmd, &baid);
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 264f1f9394b6..754a05a8c189 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 8ee4498f4245..31bc80cdcb7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1238,6 +1238,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
IWL_DEBUG_TE(mvm,
"No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
@@ -1253,6 +1254,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 0971c164b57e..c27acaf0eb1c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
#endif /* CONFIG_PM */
const struct ieee80211_ops mt7615_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7615_tx,
.start = mt7615_start,
.stop = mt7615_stop,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 7d9fb9f2d527..089102ed9ae5 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return PTR_ERR(vif);
}
@@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return result;
}
@@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif))
goto out;
@@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
if (!vif) {
vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
if (!vif) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
}
}
if (vif->monitor_flag) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
}
@@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (ndev) {
vif->monitor_flag = 1;
} else {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ERR_PTR(-EINVAL);
}
wdev = &vif->priv.wdev;
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return wdev;
}
@@ -1610,7 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
- synchronize_rcu();
+ synchronize_srcu(&wl->srcu);
return 0;
}
@@ -1635,23 +1638,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
wilc_set_wowlan_trigger(vif, enabled);
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
+ int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
@@ -1659,10 +1664,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!wl->initialized)
return -EIO;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return -EINVAL;
}
@@ -1674,7 +1679,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1757,6 +1762,7 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
+ init_srcu_struct(&wl->srcu);
}
void wlan_deinit_locks(struct wilc *wilc)
@@ -1767,6 +1773,7 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
mutex_destroy(&wilc->deinit_lock);
+ cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 919de6ffb821..f1085ccb7eed 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -1570,11 +1570,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1593,7 +1594,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
msg->body.net_info.rssi = buffer[8];
msg->body.net_info.mgmt = kmemdup(&buffer[9],
msg->body.net_info.frame_len,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
goto out;
@@ -1606,7 +1607,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -1614,13 +1615,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1647,7 +1649,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
@@ -1655,11 +1657,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1684,7 +1687,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
}
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 73f56f7b002b..710e29bea560 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
+ int srcu_idx;
u8 ret_val = 0;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return ret_val;
}
static void wilc_wake_tx_queues(struct wilc *wl)
{
+ int srcu_idx;
struct wilc_vif *ifc;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
wilc_for_each_vif(wl, ifc) {
if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
netif_wake_queue(ifc->ndev);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int wilc_txq_task(void *vp)
@@ -653,6 +655,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
unsigned char mac_addr[ETH_ALEN];
struct wilc_vif *tmp_vif;
+ int srcu_idx;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -664,19 +667,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return -EADDRNOTAVAIL;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return 0;
}
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
if (result)
@@ -764,14 +767,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
+ int srcu_idx;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
return NETDEV_TX_OK;
@@ -815,12 +819,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
unsigned int frame_len = 0;
struct wilc_vif *vif;
struct sk_buff *skb;
+ int srcu_idx;
int stats;
if (!wilc)
return;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
goto out;
@@ -848,14 +853,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
{
+ int srcu_idx;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
@@ -876,7 +882,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -906,7 +912,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
list_del_rcu(&vif->list);
wilc->vif_num--;
mutex_unlock(&wilc->vif_mutex);
- synchronize_rcu();
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
@@ -925,15 +931,16 @@ static u8 wilc_get_available_idx(struct wilc *wl)
{
int idx = 0;
struct wilc_vif *vif;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
idx = 0;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return idx;
}
@@ -983,7 +990,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
list_add_tail_rcu(&vif->list, &wl->vif_list);
wl->vif_num += 1;
mutex_unlock(&wl->vif_mutex);
- synchronize_rcu();
+ synchronize_srcu(&wl->srcu);
return vif;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index eecee3973d6a..fde8610a9c84 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -32,8 +32,8 @@
#define wilc_for_each_vif(w, v) \
struct wilc *_w = w; \
- list_for_each_entry_rcu(v, &_w->vif_list, list, \
- rcu_read_lock_held())
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
struct wilc_wfi_stats {
unsigned long rx_packets;
@@ -220,6 +220,14 @@ struct wilc {
/* protect vif list */
struct mutex vif_mutex;
+ /* Sleepable RCU struct to manipulate vif list. Sleepable version is
+ * needed over the classic RCU version because the driver's current
+ * design involves some sleeping code while manipulating a vif
+ * retrieved from vif list (so in a SRCU critical section), like:
+ * - sending commands to the chip, using info from retrieved vif
+ * - registering a new monitoring net device
+ */
+ struct srcu_struct srcu;
u8 open_ifcs;
/* protect head of transmit queue */
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 37c32d17856e..a9e872a7b2c3 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
+ int srcu_idx;
u8 *txb = wilc->tx_buffer;
struct wilc_vif *vif;
@@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
for (ac = 0; ac < NQUEUES; ac++)
tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 2e60a6991ca1..42b7db12b1bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
- /* brought up everything changes (changed == ~0) indicates first
- * open, so use our default value instead of that of wiphy.
- */
- if (changed != ~0) {
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&hw->conf.long_frame_max_tx_count));
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
!rtlpriv->proximity.proxim_on) {
struct ieee80211_channel *channel = hw->conf.chandef.chan;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index bef6819986e9..33d6342124bc 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
rc = PTR_ERR(devlink->cd_regions[i]);
dev_err(devlink->dev, "Devlink region fail,err %d", rc);
/* Delete previously created regions */
- for ( ; i >= 0; i--)
+ for (i--; i >= 0; i--)
devlink_region_destroy(devlink->cd_regions[i]);
goto region_create_fail;
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index f2aef84fc08d..d5a9360323d2 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -42,6 +42,8 @@
#define MHI_MBIM_LINK_HASH_SIZE 8
#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
+#define WDS_BIND_MUX_DATA_PORT_MUX_ID 112
+
struct mhi_mbim_link {
struct mhi_mbim_context *mbim;
struct net_device *ndev;
@@ -93,6 +95,15 @@ static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim
return NULL;
}
+static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
+{
+ if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
+ strcmp(cntrl->name, "foxconn-t99w515") == 0)
+ return WDS_BIND_MUX_DATA_PORT_MUX_ID;
+
+ return 0;
+}
+
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
@@ -596,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim;
- int err;
+ int err, link_id;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim)
@@ -617,8 +628,11 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
/* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+ /* Get the corresponding mux_id from mhi */
+ link_id = mhi_mbim_get_link_mux_id(cntrl);
+
/* Register wwan link ops with MHI controller representing WWAN instance */
- return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
}
static void mhi_mbim_remove(struct mhi_device *mhi_dev)
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 590b038e449e..6b89d596ba9a 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -125,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
kfree_skb(skb);
return -EFAULT;
}
+ if (strnlen(skb->data, count) != count) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
nci_recv_frame(vdev->ndev, skb);
return count;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index dd6ec0865141..0cfa39361d3b 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1602,4 +1602,5 @@ static struct platform_driver apple_nvme_driver = {
module_platform_driver(apple_nvme_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 954f850f113a..782090ce0bc1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req)
}
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req)
+{
+ nvme_end_req_zoned(req);
+ nvme_trace_bio_complete(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_mpath_end_request(req);
+}
+
+void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
@@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req)
else
nvme_log_error(req);
}
- nvme_end_req_zoned(req);
- nvme_trace_bio_complete(req);
- if (req->cmd_flags & REQ_NVME_MPATH)
- nvme_mpath_end_request(req);
+ __nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req)
{
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- nvme_end_req_zoned(req);
+ __nvme_end_req(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
@@ -673,7 +678,7 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
-static inline bool nvme_get_ns(struct nvme_ns *ns)
+bool nvme_get_ns(struct nvme_ns *ns)
{
return kref_get_unless_zero(&ns->kref);
}
@@ -993,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
+ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -3679,9 +3685,10 @@ out_unlock:
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@@ -3691,7 +3698,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->head->ns_id > nsid)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
@@ -3705,7 +3712,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
if (tmp->head->ns_id < ns->head->ns_id) {
- list_add(&ns->list, &tmp->list);
+ list_add_rcu(&ns->list, &tmp->list);
return;
}
}
@@ -3771,17 +3778,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
/*
* Ensure that no namespaces are added to the ctrl list after the queues
* are frozen, thereby avoiding a deadlock between scan and reset.
*/
if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
goto out_unlink_ns;
}
nvme_ns_add_to_ctrl_list(ns);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
@@ -3804,9 +3812,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
out_cleanup_ns_from_list:
nvme_put_ctrl(ctrl);
- down_write(&ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3856,9 +3865,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- down_write(&ns->ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ns->ctrl->namespaces_rwsem);
+ mutex_lock(&ns->ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_lock);
+ synchronize_srcu(&ns->ctrl->srcu);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
@@ -3948,16 +3958,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
- list_move_tail(&ns->list, &rm_list);
+ if (ns->head->ns_id > nsid) {
+ list_del_rcu(&ns->list);
+ synchronize_srcu(&ctrl->srcu);
+ list_add_tail_rcu(&ns->list, &rm_list);
+ }
}
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
-
}
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
@@ -4127,9 +4139,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
/* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
- down_write(&ctrl->namespaces_rwsem);
- list_splice_init(&ctrl->namespaces, &ns_list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &ns_list, list)
nvme_ns_remove(ns);
@@ -4577,6 +4590,7 @@ static void nvme_free_ctrl(struct device *dev)
key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ cleanup_srcu_struct(&ctrl->srcu);
nvme_auth_stop(ctrl);
nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
@@ -4609,10 +4623,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->namespaces_lock);
+
+ ret = init_srcu_struct(&ctrl->srcu);
+ if (ret)
+ return ret;
+
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
xa_init(&ctrl->cels);
- init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
@@ -4692,6 +4711,7 @@ out_release_instance:
out:
if (ctrl->discard_page)
__free_page(ctrl->discard_page);
+ cleanup_srcu_struct(&ctrl->srcu);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
@@ -4700,22 +4720,24 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mark_disk_dead(ns->disk);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
@@ -4723,14 +4745,15 @@ EXPORT_SYMBOL_GPL(nvme_unfreeze);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
@@ -4738,23 +4761,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_wait(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -4797,11 +4822,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index c6ad2148c2e0..ceb9c0ed3120 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 499a8bb7cac7..8b69427a4476 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
+static void nvme_unmap_bio(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_unmap_free_user(bio);
+ blk_rq_unmap_user(bio);
+}
+
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
blk_mq_free_request(req);
if (effects)
@@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
@@ -789,15 +796,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
bool open_for_write)
{
struct nvme_ns *ns;
- int ret;
+ int ret, srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
@@ -807,15 +814,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
+ if (!nvme_get_ns(ns)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
out_unlock:
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d16e976ae1a4..d8b6b4648eaf 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -118,7 +118,8 @@ void nvme_failover_req(struct request *req)
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ nvme_req(req)->status = 0;
+ nvme_end_req(req);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -150,16 +151,17 @@ void nvme_mpath_end_request(struct request *rq)
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
static const char *nvme_ana_state_names[] = {
@@ -193,13 +195,14 @@ out:
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -595,7 +598,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
int node, srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- for_each_node(node)
+ for_each_online_node(node)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
@@ -680,6 +683,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
unsigned *nr_change_groups = data;
struct nvme_ns *ns;
+ int srcu_idx;
dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid),
@@ -691,8 +695,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
unsigned nsid;
again:
nsid = le32_to_cpu(desc->nsids[n]);
@@ -705,7 +709,7 @@ again:
if (ns->head->ns_id > nsid)
goto again;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cacc56f4bbf4..68b400f9c42d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -282,7 +282,8 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
- struct rw_semaphore namespaces_rwsem;
+ struct mutex namespaces_lock;
+ struct srcu_struct srcu;
struct device ctrl_device;
struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
@@ -471,8 +472,6 @@ struct nvme_ns_head {
u8 pi_type;
u8 pi_offset;
u8 guard_type;
- u16 sgs;
- u32 sws;
#ifdef CONFIG_BLK_DEV_ZONED
u64 zsze;
#endif
@@ -503,7 +502,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
@@ -767,6 +766,7 @@ static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
}
}
+void nvme_end_req(struct request *req);
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
@@ -1161,6 +1161,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+bool nvme_get_ns(struct nvme_ns *ns);
void nvme_put_ns(struct nvme_ns *ns);
static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 710043086dff..102a9fb0c65f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
+ bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index e05571b2a1b0..8fa1ffcdaed4 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
if (nvme_is_path_error(nvme_sc))
return PR_STS_PATH_FAILED;
- switch (nvme_sc) {
+ switch (nvme_sc & 0x7ff) {
case NVME_SC_SUCCESS:
return PR_STS_SUCCESS;
case NVME_SC_RESERVATION_CONFLICT:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 7c43a0ad6877..685e89b35d33 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item,
return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
}
}
- return sprintf(page, "reserved\n");
+ return sprintf(page, "\n");
+}
+
+static u8 nvmet_addr_tsas_rdma_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
+ return nvmet_addr_tsas_rdma[i].type;
+ }
+ return NVMF_RDMA_QPTYPE_INVALID;
+}
+
+static u8 nvmet_addr_tsas_tcp_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
+ return nvmet_addr_tsas_tcp[i].type;
+ }
+ return NVMF_TCP_SECTYPE_INVALID;
}
static ssize_t nvmet_addr_tsas_store(struct config_item *item,
@@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = nvmet_port_disc_addr_treq_mask(port);
- u8 sectype;
- int i;
+ u8 sectype, qptype;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
- if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
- sectype = nvmet_addr_tsas_tcp[i].type;
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ qptype = nvmet_addr_tsas_rdma_store(page);
+ if (qptype == port->disc_addr.tsas.rdma.qptype)
+ return count;
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ sectype = nvmet_addr_tsas_tcp_store(page);
+ if (sectype != NVMF_TCP_SECTYPE_INVALID)
goto found;
- }
}
pr_err("Invalid value '%s' for tsas\n", page);
@@ -676,10 +697,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
if (kstrtobool(page, &enable))
return -EINVAL;
+ /*
+ * take a global nvmet_config_sem because the disable routine has a
+ * window where it releases the subsys-lock, giving a chance to
+ * a parallel enable to concurrently execute causing the disable to
+ * have a misaccounting of the ns percpu_ref.
+ */
+ down_write(&nvmet_config_sem);
if (enable)
ret = nvmet_ns_enable(ns);
else
nvmet_ns_disable(ns);
+ up_write(&nvmet_config_sem);
return ret ? ret : count;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2fde22323622..4ff460ba2826 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -818,6 +818,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
nvmet_auth_sq_free(sq);
+ /*
+ * we must reference the ctrl again after waiting for inflight IO
+ * to complete. Because admin connect may have sneaked in after we
+ * store sq->ctrl locally, but before we killed the percpu_ref. the
+ * admin connect allocates and assigns sq->ctrl, which now needs a
+ * final ref put, as this ctrl is going away.
+ */
+ ctrl = sq->ctrl;
+
if (ctrl) {
/*
* The teardown flow may take some time, and the host may not
@@ -948,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index d61b8c6ff3b2..cb34d644ed08 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -333,7 +333,6 @@ done:
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
-
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 042b379cbb36..69d77d34bec1 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
@@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 337ee1cb09ae..381b4394731f 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue {
struct workqueue_struct *work_q;
struct kref ref;
/* array of fcp_iods */
- struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
+ struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport {
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index bb4a69d538fd..f003782d4ecf 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
- nvmet_passthru_override_id_ctrl(req);
+ status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
- nvmet_passthru_override_id_ns(req);
+ status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
- nvmet_passthru_override_id_descs(req);
+ status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
index d3d49d22338b..1d1bf84a099f 100644
--- a/drivers/nvmem/apple-efuses.c
+++ b/drivers/nvmem/apple-efuses.c
@@ -78,4 +78,5 @@ static struct platform_driver apple_efuses_driver = {
module_platform_driver(apple_efuses_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple SoC eFuse driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 5cdf339cfbec..3d8c87835f4d 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -253,5 +253,6 @@ static int __init brcm_nvram_init(void)
subsys_initcall_sync(brcm_nvram_init);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom I/O-mapped NVRAM support driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index e1ec3b7200d7..12df7d037d37 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -179,12 +179,35 @@ static ssize_t type_show(struct device *dev,
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
- return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+ return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]);
}
static DEVICE_ATTR_RO(type);
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sysfs_emit(buf, "%d\n", nvmem->read_only);
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ int ret = kstrtobool(buf, &nvmem->read_only);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(force_ro);
+
static struct attribute *nvmem_attrs[] = {
+ &dev_attr_force_ro.attr,
&dev_attr_type.attr,
NULL,
};
@@ -203,19 +226,12 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
- /* Stop the user from reading */
- if (pos >= nvmem->size)
- return 0;
-
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_read)
@@ -243,19 +259,12 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
- /* Stop the user from writing */
- if (pos >= nvmem->size)
- return -EFBIG;
-
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_write)
@@ -299,6 +308,25 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return nvmem_bin_attr_get_umode(nvmem);
}
+static umode_t nvmem_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ /*
+ * If the device has no .reg_write operation, do not allow
+ * configuration as read-write.
+ * If the device is set as read-only by configuration, it
+ * can be forced into read-write mode using the 'force_ro'
+ * attribute.
+ */
+ if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write)
+ return 0; /* Attribute not visible */
+
+ return attr->mode;
+}
+
static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index);
@@ -355,11 +383,7 @@ static const struct attribute_group nvmem_bin_group = {
.bin_attrs = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
-};
-
-/* Cell attributes will be dynamically allocated */
-static struct attribute_group nvmem_cells_group = {
- .name = "cells",
+ .is_visible = nvmem_attr_is_visible,
};
static const struct attribute_group *nvmem_dev_groups[] = {
@@ -367,11 +391,6 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-static const struct attribute_group *nvmem_cells_groups[] = {
- &nvmem_cells_group,
- NULL,
-};
-
static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
@@ -429,23 +448,24 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
{
- struct bin_attribute **cells_attrs, *attrs;
+ struct attribute_group group = {
+ .name = "cells",
+ };
struct nvmem_cell_entry *entry;
+ struct bin_attribute *attrs;
unsigned int ncells = 0, i = 0;
int ret = 0;
mutex_lock(&nvmem_mutex);
- if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
- nvmem_cells_group.bin_attrs = NULL;
+ if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated)
goto unlock_mutex;
- }
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
- cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
- sizeof(struct bin_attribute *), GFP_KERNEL);
- if (!cells_attrs) {
+ group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!group.bin_attrs) {
ret = -ENOMEM;
goto unlock_mutex;
}
@@ -472,13 +492,11 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
goto unlock_mutex;
}
- cells_attrs[i] = &attrs[i];
+ group.bin_attrs[i] = &attrs[i];
i++;
}
- nvmem_cells_group.bin_attrs = cells_attrs;
-
- ret = device_add_groups(&nvmem->dev, nvmem_cells_groups);
+ ret = device_add_group(&nvmem->dev, &group);
if (ret)
goto unlock_mutex;
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 33678d0af2c2..52ed9a62ca5b 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -42,20 +42,19 @@ static int meson_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_sm_firmware *fw;
- struct device_node *sm_np;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
+ struct device_node *sm_np __free(device_node) =
+ of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
- sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
if (!sm_np) {
dev_err(&pdev->dev, "no secure-monitor node\n");
return -ENODEV;
}
fw = meson_sm_get(sm_np);
- of_node_put(sm_np);
if (!fw)
return -EPROBE_DEFER;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 2b40978ddb18..013e67136f3b 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -206,6 +206,7 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
.add_legacy_fixed_of_cells = true,
+ .type = NVMEM_TYPE_OTP,
.stride = 1,
.word_size = 1,
.read_only = true,
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
index cb9aa5428350..ebc3f0b24166 100644
--- a/drivers/nvmem/rockchip-otp.c
+++ b/drivers/nvmem/rockchip-otp.c
@@ -255,6 +255,8 @@ static int rockchip_otp_read(void *context, unsigned int offset,
static struct nvmem_config otp_config = {
.name = "rockchip-otp",
.owner = THIS_MODULE,
+ .add_legacy_fixed_of_cells = true,
+ .type = NVMEM_TYPE_OTP,
.read_only = true,
.stride = 1,
.word_size = 1,
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index befbab156cda..936e39b20b38 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -249,5 +249,6 @@ static struct platform_driver u_boot_env_driver = {
module_platform_driver(u_boot_env_driver);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("U-Boot environment variables support module");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 174900072c18..462375b293e4 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -25,6 +25,8 @@
#include <linux/string.h>
#include <linux/slab.h>
+#include "of_private.h"
+
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @dev: Device node of the device whose interrupt is to be mapped
@@ -96,6 +98,57 @@ static const char * const of_irq_imap_abusers[] = {
NULL,
};
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
+{
+ u32 intsize, addrsize;
+ struct device_node *np;
+
+ /* Get the interrupt parent */
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ np = of_node_get(of_irq_dflt_pic);
+ else
+ np = of_find_node_by_phandle(be32_to_cpup(imap));
+ imap++;
+
+ /* Check if not found */
+ if (!np) {
+ pr_debug(" -> imap parent not found !\n");
+ return NULL;
+ }
+
+ /* Get #interrupt-cells and #address-cells of new parent */
+ if (of_property_read_u32(np, "#interrupt-cells",
+ &intsize)) {
+ pr_debug(" -> parent lacks #interrupt-cells!\n");
+ of_node_put(np);
+ return NULL;
+ }
+ if (of_property_read_u32(np, "#address-cells",
+ &addrsize))
+ addrsize = 0;
+
+ pr_debug(" -> intsize=%d, addrsize=%d\n",
+ intsize, addrsize);
+
+ /* Check for malformed properties */
+ if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
+ || (len < (addrsize + intsize))) {
+ of_node_put(np);
+ return NULL;
+ }
+
+ pr_debug(" -> imaplen=%d\n", len);
+
+ imap += addrsize + intsize;
+
+ out_irq->np = np;
+ for (int i = 0; i < intsize; i++)
+ out_irq->args[i] = be32_to_cpup(imap - intsize + i);
+ out_irq->args_count = intsize;
+
+ return imap;
+}
+
/**
* of_irq_parse_raw - Low level interrupt tree parsing
* @addr: address specifier (start of "reg" property of the device) in be32 format
@@ -112,12 +165,12 @@ static const char * const of_irq_imap_abusers[] = {
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
- struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
+ struct device_node *ipar, *tnode, *old = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
- u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
- int imaplen, match, i, rc = -EINVAL;
+ const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ u32 intsize = 1, addrsize;
+ int i, rc = -EINVAL;
#ifdef DEBUG
of_print_phandle_args("of_irq_parse_raw: ", out_irq);
@@ -176,6 +229,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
+ int imaplen, match;
+ const __be32 *imap, *oldimap, *imask;
+ struct device_node *newpar;
/*
* Now check if cursor is an interrupt-controller and
* if it is then we are done, unless there is an
@@ -216,7 +272,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/* Parse interrupt-map */
match = 0;
- while (imaplen > (addrsize + intsize + 1) && !match) {
+ while (imaplen > (addrsize + intsize + 1)) {
/* Compare specifiers */
match = 1;
for (i = 0; i < (addrsize + intsize); i++, imaplen--)
@@ -224,48 +280,17 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
- /* Get the interrupt parent */
- if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
- newpar = of_node_get(of_irq_dflt_pic);
- else
- newpar = of_find_node_by_phandle(be32_to_cpup(imap));
- imap++;
- --imaplen;
-
- /* Check if not found */
- if (newpar == NULL) {
- pr_debug(" -> imap parent not found !\n");
- goto fail;
- }
-
- if (!of_device_is_available(newpar))
- match = 0;
-
- /* Get #interrupt-cells and #address-cells of new
- * parent
- */
- if (of_property_read_u32(newpar, "#interrupt-cells",
- &newintsize)) {
- pr_debug(" -> parent lacks #interrupt-cells!\n");
- goto fail;
- }
- if (of_property_read_u32(newpar, "#address-cells",
- &newaddrsize))
- newaddrsize = 0;
-
- pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
- newintsize, newaddrsize);
-
- /* Check for malformed properties */
- if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
- || (imaplen < (newaddrsize + newintsize))) {
- rc = -EFAULT;
+ oldimap = imap;
+ imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
+ if (!imap)
goto fail;
- }
- imap += newaddrsize + newintsize;
- imaplen -= newaddrsize + newintsize;
+ match &= of_device_is_available(out_irq->np);
+ if (match)
+ break;
+ of_node_put(out_irq->np);
+ imaplen -= imap - oldimap;
pr_debug(" -> imaplen=%d\n", imaplen);
}
if (!match) {
@@ -287,11 +312,11 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- match_array = imap - newaddrsize - newintsize;
- for (i = 0; i < newintsize; i++)
- out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
- out_irq->args_count = intsize = newintsize;
- addrsize = newaddrsize;
+ match_array = oldimap + 1;
+
+ newpar = out_irq->np;
+ intsize = out_irq->args_count;
+ addrsize = (imap - match_array) - intsize;
if (ipar == newpar) {
pr_debug("%pOF interrupt-map entry to self\n", ipar);
@@ -300,7 +325,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
skiplevel:
/* Iterate again with new parent */
- out_irq->np = newpar;
pr_debug(" -> new parent: %pOF\n", newpar);
of_node_put(ipar);
ipar = newpar;
@@ -310,7 +334,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
fail:
of_node_put(ipar);
- of_node_put(newpar);
return rc;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 94fc0aa07af9..04aa2a91f851 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -159,6 +159,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
extern int of_bus_n_addr_cells(struct device_node *np);
extern int of_bus_n_size_cells(struct device_node *np);
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
+ struct of_phandle_args *out_irq);
+
struct bus_dma_region;
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
int of_dma_get_range(struct device_node *np,
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
index a9301d293f01..c85a258bc6ae 100644
--- a/drivers/of/of_test.c
+++ b/drivers/of/of_test.c
@@ -54,4 +54,5 @@ static struct kunit_suite of_dtb_suite = {
kunit_test_suites(
&of_dtb_suite,
);
+MODULE_DESCRIPTION("KUnit tests for OF APIs");
MODULE_LICENSE("GPL");
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 1c83e68f805b..164d77cb9445 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1306,10 +1306,10 @@ static struct device_node *parse_interrupts(struct device_node *np,
static struct device_node *parse_interrupt_map(struct device_node *np,
const char *prop_name, int index)
{
- const __be32 *imap, *imap_end, *addr;
+ const __be32 *imap, *imap_end;
struct of_phandle_args sup_args;
u32 addrcells, intcells;
- int i, imaplen;
+ int imaplen;
if (!IS_ENABLED(CONFIG_OF_IRQ))
return NULL;
@@ -1322,33 +1322,23 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
addrcells = of_bus_n_addr_cells(np);
imap = of_get_property(np, "interrupt-map", &imaplen);
- if (!imap || imaplen <= (addrcells + intcells))
+ imaplen /= sizeof(*imap);
+ if (!imap)
return NULL;
- imap_end = imap + imaplen;
- while (imap < imap_end) {
- addr = imap;
- imap += addrcells;
+ imap_end = imap + imaplen;
- sup_args.np = np;
- sup_args.args_count = intcells;
- for (i = 0; i < intcells; i++)
- sup_args.args[i] = be32_to_cpu(imap[i]);
- imap += intcells;
+ for (int i = 0; imap + addrcells + intcells + 1 < imap_end; i++) {
+ imap += addrcells + intcells;
- /*
- * Upon success, the function of_irq_parse_raw() returns
- * interrupt controller DT node pointer in sup_args.np.
- */
- if (of_irq_parse_raw(addr, &sup_args))
+ imap = of_irq_parse_imap_parent(imap, imap_end - imap, &sup_args);
+ if (!imap)
return NULL;
- if (!index)
+ if (i == index)
return sup_args.np;
of_node_put(sup_args.np);
- imap += sup_args.args_count + 1;
- index--;
}
return NULL;
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 6d78ec3a762f..2231dbfd870d 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -97,7 +97,6 @@ static int daisy_drv_probe(struct pardevice *par_dev)
static struct parport_driver daisy_driver = {
.name = "daisy_drv",
.probe = daisy_drv_probe,
- .devmodel = true,
};
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index e6dc857aac3f..e06c7b2aac5c 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev)
parport_put_port(port);
}
-static struct platform_driver amiga_parallel_driver = {
+/*
+ * amiga_parallel_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_parallel_driver __refdata = {
.remove_new = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 49c74ded8a53..2d34f783b36e 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -49,8 +49,6 @@ static DEFINE_SPINLOCK(parportlist_lock);
static LIST_HEAD(all_ports);
static DEFINE_SPINLOCK(full_list_lock);
-static LIST_HEAD(drivers);
-
static DEFINE_MUTEX(registration_lock);
/* What you can do to a port that's gone away.. */
@@ -165,10 +163,6 @@ static int driver_check(struct device_driver *dev_drv, void *_port)
static void attach_driver_chain(struct parport *port)
{
/* caller has exclusive registration_lock */
- struct parport_driver *drv;
-
- list_for_each_entry(drv, &drivers, list)
- drv->attach(port);
/*
* call the driver_check function of the drivers registered in
@@ -191,10 +185,7 @@ static int driver_detach(struct device_driver *_drv, void *_port)
/* Call detach(port) for each registered driver. */
static void detach_driver_chain(struct parport *port)
{
- struct parport_driver *drv;
/* caller has exclusive registration_lock */
- list_for_each_entry(drv, &drivers, list)
- drv->detach(port);
/*
* call the detach function of the drivers registered in
@@ -1219,4 +1210,5 @@ irqreturn_t parport_irq_handler(int irq, void *dev_id)
}
EXPORT_SYMBOL(parport_irq_handler);
+MODULE_DESCRIPTION("Parallel-port resource manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 30f031de9cfe..b123da16b63b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -289,8 +289,6 @@ void pci_cfg_access_lock(struct pci_dev *dev)
{
might_sleep();
- lock_map_acquire(&dev->cfg_access_lock);
-
raw_spin_lock_irq(&pci_lock);
if (dev->block_cfg_access)
pci_wait_cfg(dev);
@@ -345,8 +343,6 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
raw_spin_unlock_irqrestore(&pci_lock, flags);
wake_up_all(&pci_cfg_wait);
-
- lock_map_release(&dev->cfg_access_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c5625dd9bf49..3a45879d85db 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -352,7 +352,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
+ struct msi_desc *entry, desc;
int ret;
/* Reject multi-MSI early on irq domain enabled architectures */
@@ -377,6 +377,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
+ /*
+ * Copy the MSI descriptor for the error path because
+ * pci_msi_setup_msi_irqs() will free it for the hierarchical
+ * interrupt domain case.
+ */
+ memcpy(&desc, entry, sizeof(desc));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -396,7 +402,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto unlock;
err:
- pci_msi_unmask(entry, msi_multi_mask(entry));
+ pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 59e0949fb079..35fb1f17a589 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4883,7 +4883,6 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
- lock_map_assert_held(&dev->cfg_access_lock);
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8e696e547565..5fbabb4e3425 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2546,9 +2546,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- lockdep_register_key(&dev->cfg_access_key);
- lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev),
- &dev->cfg_access_key, 0);
dma_set_max_seg_size(&dev->dev, 65536);
dma_set_seg_boundary(&dev->dev, 0xffffffff);
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 7fdc25afcf2f..de7046e6b9c4 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -351,6 +351,7 @@ static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate,
clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
val = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
+ val &= ~ASPEED_PECI_CTRL_CLK_DIV_MASK;
val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp);
writel(val, aspeed_peci->base + ASPEED_PECI_CTRL);
diff --git a/drivers/peci/core.c b/drivers/peci/core.c
index 8f8bda2f2a62..8ff3e5d225ae 100644
--- a/drivers/peci/core.c
+++ b/drivers/peci/core.c
@@ -163,9 +163,8 @@ EXPORT_SYMBOL_NS_GPL(devm_peci_controller_add, PECI);
static const struct peci_device_id *
peci_bus_match_device_id(const struct peci_device_id *id, struct peci_device *device)
{
- while (id->family != 0) {
- if (id->family == device->info.family &&
- id->model == device->info.model)
+ while (id->x86_vfm != 0) {
+ if (id->x86_vfm == device->info.x86_vfm)
return id;
id++;
}
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index bd990acd92b8..152bbd8e717a 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -294,38 +294,31 @@ peci_cpu_probe(struct peci_device *device, const struct peci_device_id *id)
static const struct peci_device_id peci_cpu_device_ids[] = {
{ /* Haswell Xeon */
- .family = 6,
- .model = INTEL_FAM6_HASWELL_X,
+ .x86_vfm = INTEL_HASWELL_X,
.data = "hsx",
},
{ /* Broadwell Xeon */
- .family = 6,
- .model = INTEL_FAM6_BROADWELL_X,
+ .x86_vfm = INTEL_BROADWELL_X,
.data = "bdx",
},
{ /* Broadwell Xeon D */
- .family = 6,
- .model = INTEL_FAM6_BROADWELL_D,
+ .x86_vfm = INTEL_BROADWELL_D,
.data = "bdxd",
},
{ /* Skylake Xeon */
- .family = 6,
- .model = INTEL_FAM6_SKYLAKE_X,
+ .x86_vfm = INTEL_SKYLAKE_X,
.data = "skx",
},
{ /* Icelake Xeon */
- .family = 6,
- .model = INTEL_FAM6_ICELAKE_X,
+ .x86_vfm = INTEL_ICELAKE_X,
.data = "icx",
},
{ /* Icelake Xeon D */
- .family = 6,
- .model = INTEL_FAM6_ICELAKE_D,
+ .x86_vfm = INTEL_ICELAKE_D,
.data = "icxd",
},
{ /* Sapphire Rapids Xeon */
- .family = 6,
- .model = INTEL_FAM6_SAPPHIRERAPIDS_X,
+ .x86_vfm = INTEL_SAPPHIRERAPIDS_X,
.data = "spr",
},
{ }
diff --git a/drivers/peci/device.c b/drivers/peci/device.c
index ee01f03c29b7..37ca7dd61807 100644
--- a/drivers/peci/device.c
+++ b/drivers/peci/device.c
@@ -100,8 +100,7 @@ static int peci_device_info_init(struct peci_device *device)
if (ret)
return ret;
- device->info.family = peci_x86_cpu_family(cpu_id);
- device->info.model = peci_x86_cpu_model(cpu_id);
+ device->info.x86_vfm = IFM(peci_x86_cpu_family(cpu_id), peci_x86_cpu_model(cpu_id));
ret = peci_get_revision(device, &revision);
if (ret)
diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h
index 506bafcccbbf..7a4f6eae2f90 100644
--- a/drivers/peci/internal.h
+++ b/drivers/peci/internal.h
@@ -66,13 +66,11 @@ struct peci_request *peci_xfer_ep_mmio64_readl(struct peci_device *device, u8 ba
/**
* struct peci_device_id - PECI device data to match
* @data: pointer to driver private data specific to device
- * @family: device family
- * @model: device model
+ * @x86_vfm: device vendor-family-model
*/
struct peci_device_id {
const void *data;
- u16 family;
- u8 model;
+ u32 x86_vfm;
};
extern const struct device_type peci_device_type;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7f999e8a433d..7b00945f7191 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
+static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
@@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
@@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
@@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
@@ -1794,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
- .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
- .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
- .dp_tx_tbl = qmp_v6_dp_tx_tbl,
- .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+ .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl),
- .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+ .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3),
- .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
- .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
@@ -1822,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v45_usb3phy_regs_layout,
+ .regs = qmp_v6_n4_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
new file mode 100644
index 000000000000..b3024714dab4
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_
+#define QCOM_PHY_QMP_PCS_V6_N4_H_
+
+/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_N4_PCS_SW_RESET 0x000
+#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014
+#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V6_N4_PCS_START_CONTROL 0x044
+#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
index a814ad11af07..d37cc0d4fd36 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
@@ -6,11 +6,24 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14
+#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V6_N4_TX_TX_POL_INV 0x50
+#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
#define QSERDES_V6_N4_TX_LANE_MODE_1 0x78
#define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c
#define QSERDES_V6_N4_TX_LANE_MODE_3 0x80
+#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V6_N4_TX_TX_BAND 0xd8
+#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0
#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8
#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d10b8f653c4b..d0f41e4aaa85 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -46,6 +46,8 @@
#include "phy-qcom-qmp-pcs-v6.h"
+#include "phy-qcom-qmp-pcs-v6-n4.h"
+
#include "phy-qcom-qmp-pcs-v6_20.h"
#include "phy-qcom-qmp-pcs-v7.h"
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7178a38475cc..27fd54795791 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -245,7 +245,7 @@ static const char * const irq_type_names[] = {
};
static bool persist_gpio_outputs;
-module_param(persist_gpio_outputs, bool, 0644);
+module_param(persist_gpio_outputs, bool, 0444);
MODULE_PARM_DESC(persist_gpio_outputs, "Enable GPIO_OUT persistence when pin is freed");
static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index cffeb869130d..f424a57f0013 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
* an -EPROBE_DEFER later, as that is the worst case.
*/
if (ret == -EPROBE_DEFER) {
- pinctrl_free(p, false);
mutex_unlock(&pinctrl_maps_mutex);
+ pinctrl_free(p, false);
return ERR_PTR(ret);
}
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3bedf36a0019..3f56991f5b89 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
- .num = 2,
- .pin = 12,
- .reg = 0x24,
- .bit = 8,
- .mask = 0x3
- }, {
+ /* gpio2_b7_sel */
.num = 2,
.pin = 15,
.reg = 0x28,
.bit = 0,
.mask = 0x7
}, {
+ /* gpio2_c7_sel */
.num = 2,
.pin = 23,
.reg = 0x30,
.bit = 14,
.mask = 0x3
+ }, {
+ /* gpio3_b1_sel */
+ .num = 3,
+ .pin = 9,
+ .reg = 0x44,
+ .bit = 2,
+ .mask = 0x3
+ }, {
+ /* gpio3_b2_sel */
+ .num = 3,
+ .pin = 10,
+ .reg = 0x44,
+ .bit = 4,
+ .mask = 0x3
+ }, {
+ /* gpio3_b3_sel */
+ .num = 3,
+ .pin = 11,
+ .reg = 0x44,
+ .bit = 6,
+ .mask = 0x3
+ }, {
+ /* gpio3_b4_sel */
+ .num = 3,
+ .pin = 12,
+ .reg = 0x44,
+ .bit = 8,
+ .mask = 0x3
+ }, {
+ /* gpio3_b5_sel */
+ .num = 3,
+ .pin = 13,
+ .reg = 0x44,
+ .bit = 10,
+ .mask = 0x3
+ }, {
+ /* gpio3_b6_sel */
+ .num = 3,
+ .pin = 14,
+ .reg = 0x44,
+ .bit = 12,
+ .mask = 0x3
+ }, {
+ /* gpio3_b7_sel */
+ .num = 3,
+ .pin = 15,
+ .reg = 0x44,
+ .bit = 14,
+ .mask = 0x3
},
};
@@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
if (ret) {
/* revert the already done pin settings */
- for (cnt--; cnt >= 0; cnt--)
+ for (cnt--; cnt >= 0; cnt--) {
+ bank = pin_to_bank(info, pins[cnt]);
rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+ }
return ret;
}
@@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
- IOMUX_WIDTH_3BIT,
+ 0,
IOMUX_WIDTH_3BIT,
0),
PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
@@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
.pin_banks = rk3328_pin_banks,
.nr_banks = ARRAY_SIZE(rk3328_pin_banks),
.label = "RK3328-GPIO",
- .type = RK3288,
+ .type = RK3328,
.grf_mux_offset = 0x0,
.iomux_recalced = rk3328_mux_recalced_data,
.niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data),
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 4759f336941e..849266f8b191 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
RK3188,
RK3288,
RK3308,
+ RK3328,
RK3368,
RK3399,
RK3568,
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
index 085047320853..5e7c7cf93445 100644
--- a/drivers/pinctrl/pinctrl-tps6594.c
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -486,6 +486,7 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev)
break;
case TPS6593:
case TPS6594:
+ case LP8764:
pctrl_desc->pins = tps6594_pins;
pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4e80c7204e5f..4abd6f18bbef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1207,7 +1207,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c3256bfde502..60be78da9f52 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
* This has to be atomically executed to protect against a concurrent
* interrupt.
*/
- raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ spin_lock_irqsave(&pctrl->lock, flags);
ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
- raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
if (ret)
dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 061aa9647c19..c2aab0cfab33 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -946,4 +946,5 @@ static struct platform_driver goldfish_pipe_driver = {
module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <digit@google.com>");
+MODULE_DESCRIPTION("Goldfish virtual device for QEMU pipes");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 3ef655591424..abe7be602f84 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
static int nvsw_sn2201_probe(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201;
+ int ret;
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
if (!nvsw_sn2201)
@@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
nvsw_sn2201->dev = &pdev->dev;
platform_set_drvdata(pdev, nvsw_sn2201);
- platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
+ if (ret)
+ return ret;
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0ec952b5d03e..665fa9524986 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -136,6 +136,7 @@ config YOGABOOK
config YT2_1380
tristate "Lenovo Yoga Tablet 2 1380 fast charge driver"
depends on SERIAL_DEV_BUS
+ depends on EXTCON
depends on ACPI
help
Say Y here to enable support for the custom fast charging protocol
@@ -515,6 +516,7 @@ config THINKPAD_ACPI
select NVRAM
select NEW_LEDS
select LEDS_CLASS
+ select INPUT_SPARSEKMAP
help
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index d84ea66eecc6..8fcf38eed7f0 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void)
return ret;
}
+/*
+ * This check is only needed for backward compatibility of previous platforms.
+ * All new platforms are expected to support ACPI based probing.
+ */
+static bool legacy_hsmp_support(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return false;
+
+ switch (boot_cpu_data.x86) {
+ case 0x19:
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x1F:
+ case 0x30 ... 0x3F:
+ case 0x90 ... 0x9F:
+ case 0xA0 ... 0xAF:
+ return true;
+ default:
+ return false;
+ }
+ case 0x1A:
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x1F:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
- pr_err("HSMP is not supported on Family:%x model:%x\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return ret;
- }
-
/*
* amd_nb_num() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
@@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void)
return ret;
if (!plat_dev.is_acpi_device) {
- ret = hsmp_plat_dev_register();
+ if (legacy_hsmp_support()) {
+ /* Not ACPI device, but supports HSMP, register a plat_dev */
+ ret = hsmp_plat_dev_register();
+ } else {
+ /* Not ACPI, Does not support HSMP */
+ pr_info("HSMP is not supported on Family:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ ret = -ENODEV;
+ }
if (ret)
platform_driver_unregister(&amd_hsmp_driver);
}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index efcf909786a5..2423dc91debb 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -171,6 +171,7 @@ static void __exit amilo_rfkill_exit(void)
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
+MODULE_DESCRIPTION("Fujitsu-Siemens Amilo rfkill support");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index e61bfaf8b5c4..b562ed99ec4e 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/container_of.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/capability.h>
@@ -25,11 +26,16 @@ static u32 da_supported_commands;
static int da_num_tokens;
static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
-static struct device_attribute *token_location_attrs;
-static struct device_attribute *token_value_attrs;
+static struct token_sysfs_data *token_entries;
static struct attribute **token_attrs;
static DEFINE_MUTEX(smbios_mutex);
+struct token_sysfs_data {
+ struct device_attribute location_attr;
+ struct device_attribute value_attr;
+ struct calling_interface_token *token;
+};
+
struct smbios_device {
struct list_head list;
struct device *device;
@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int match_attribute(struct device *dev,
- struct device_attribute *attr)
-{
- int i;
-
- for (i = 0; i < da_num_tokens * 2; i++) {
- if (!token_attrs[i])
- continue;
- if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
- return i/2;
- }
- dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
- return -EINVAL;
-}
-
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].location);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->location);
}
static ssize_t value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].value);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->value);
}
static struct attribute_group smbios_attribute_group = {
@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
{
char *location_name;
char *value_name;
- size_t size;
int ret;
int i, j;
- /* (number of tokens + 1 for null terminated */
- size = sizeof(struct device_attribute) * (da_num_tokens + 1);
- token_location_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_location_attrs)
+ token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
+ if (!token_entries)
return -ENOMEM;
- token_value_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_value_attrs)
- goto out_allocate_value;
/* need to store both location and value + terminator*/
- size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
- token_attrs = kzalloc(size, GFP_KERNEL);
+ token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
if (!token_attrs)
goto out_allocate_attrs;
@@ -496,32 +474,34 @@ static int build_tokens_sysfs(struct platform_device *dev)
/* skip empty */
if (da_tokens[i].tokenID == 0)
continue;
+
+ token_entries[i].token = &da_tokens[i];
+
/* add location */
location_name = kasprintf(GFP_KERNEL, "%04x_location",
da_tokens[i].tokenID);
if (location_name == NULL)
goto out_unwind_strings;
- sysfs_attr_init(&token_location_attrs[i].attr);
- token_location_attrs[i].attr.name = location_name;
- token_location_attrs[i].attr.mode = 0444;
- token_location_attrs[i].show = location_show;
- token_attrs[j++] = &token_location_attrs[i].attr;
+
+ sysfs_attr_init(&token_entries[i].location_attr.attr);
+ token_entries[i].location_attr.attr.name = location_name;
+ token_entries[i].location_attr.attr.mode = 0444;
+ token_entries[i].location_attr.show = location_show;
+ token_attrs[j++] = &token_entries[i].location_attr.attr;
/* add value */
value_name = kasprintf(GFP_KERNEL, "%04x_value",
da_tokens[i].tokenID);
- if (value_name == NULL)
- goto loop_fail_create_value;
- sysfs_attr_init(&token_value_attrs[i].attr);
- token_value_attrs[i].attr.name = value_name;
- token_value_attrs[i].attr.mode = 0444;
- token_value_attrs[i].show = value_show;
- token_attrs[j++] = &token_value_attrs[i].attr;
- continue;
-
-loop_fail_create_value:
- kfree(location_name);
- goto out_unwind_strings;
+ if (!value_name) {
+ kfree(location_name);
+ goto out_unwind_strings;
+ }
+
+ sysfs_attr_init(&token_entries[i].value_attr.attr);
+ token_entries[i].value_attr.attr.name = value_name;
+ token_entries[i].value_attr.attr.mode = 0444;
+ token_entries[i].value_attr.show = value_show;
+ token_attrs[j++] = &token_entries[i].value_attr.attr;
}
smbios_attribute_group.attrs = token_attrs;
@@ -532,14 +512,12 @@ loop_fail_create_value:
out_unwind_strings:
while (i--) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
out_allocate_attrs:
- kfree(token_value_attrs);
-out_allocate_value:
- kfree(token_location_attrs);
+ kfree(token_entries);
return -ENOMEM;
}
@@ -551,12 +529,11 @@ static void free_group(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj,
&smbios_attribute_group);
for (i = 0; i < da_num_tokens; i++) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
- kfree(token_value_attrs);
- kfree(token_location_attrs);
+ kfree(token_entries);
}
static int __init dell_smbios_init(void)
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index dd8240009565..182a07d8ae3d 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -49,4 +49,5 @@ int fw_attributes_class_put(void)
EXPORT_SYMBOL_GPL(fw_attributes_class_put);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_DESCRIPTION("Firmware attributes class helper module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 1d4bbae115f1..231b37909801 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -29,6 +29,7 @@ static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Show debug output");
+MODULE_DESCRIPTION("IBM Premium Real Time Mode (PRTM) driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>");
MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>");
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index c7a827645864..10cd65497cc1 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -38,6 +38,7 @@ MODULE_PARM_DESC(enable_sw_tablet_mode,
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
+MODULE_DESCRIPTION("Intel HID Event hotkey driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index ddfba38c2104..f2cb87dc2d37 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void)
module_init(pmc_core_platform_init);
module_exit(pmc_core_platform_exit);
+MODULE_DESCRIPTION("Intel PMC Core platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 6bc9c4a603e0..f3a60e14d4c1 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Intel Rapid Start Technology Driver");
MODULE_LICENSE("GPL");
static ssize_t irst_show_wakeup_events(struct device *dev,
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index cd25d0585324..31019a1a6d5e 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Intel Smart Connect disabling driver");
MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 7bac7841ff0a..7fa360073f6e 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -1610,8 +1610,8 @@ void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
/* Free the package instance when the all partitions are removed */
if (!tpmi_sst->partition_mask_current) {
- kfree(tpmi_sst);
isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ kfree(tpmi_sst);
}
mutex_unlock(&isst_tpmi_dev_lock);
}
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 84c1353eb12b..9b7ce03ba085 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -24,6 +24,7 @@
#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+MODULE_DESCRIPTION("Intel Virtual Button driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index d0fee5d375d7..9c7857842caf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
#define WMI_EVENT_GUID WMI_EVENT_GUID0
-#define WMAB_METHOD "\\XINI.WMAB"
-#define WMBB_METHOD "\\XINI.WMBB"
#define SB_GGOV_METHOD "\\_SB.GGOV"
#define GOV_TLED 0x2020008
#define WM_GET 1
@@ -74,7 +72,7 @@ static u32 inited;
static int battery_limit_use_wmbb;
static struct led_classdev kbd_backlight;
-static enum led_brightness get_kbd_backlight_level(void);
+static enum led_brightness get_kbd_backlight_level(struct device *dev);
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
* this key both sends an event and
* changes backlight level.
*/
- {KE_KEY, 0x80, {KEY_RFKILL} },
{KE_END, 0}
};
@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
return res;
}
-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
args[2].type = ACPI_TYPE_INTEGER;
args[2].integer.value = arg2;
- status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMAB: call failed.\n");
return NULL;
}
return buffer.pointer;
}
-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u8 buf[32];
@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
args[2].buffer.length = 32;
args[2].buffer.pointer = buf;
- status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMBB: call failed.\n");
return NULL;
}
@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
if (eventcode == 0x10000000) {
led_classdev_notify_brightness_hw_changed(
- &kbd_backlight, get_kbd_backlight_level());
+ &kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
} else {
key = sparse_keymap_entry_from_scancode(
wmi_input_dev, eventcode);
@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
static void acpi_notify(struct acpi_device *device, u32 event)
{
- struct key_entry *key;
-
acpi_handle_debug(device->handle, "notify: %d\n", event);
- if (inited & INIT_SPARSE_KEYMAP) {
- key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
- if (key && key->type == KE_KEY)
- sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
- }
}
static ssize_t fan_mode_store(struct device *dev,
@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
m = r->integer.value;
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
kfree(r);
return count;
@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
if (!r)
return -EIO;
@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
if (!r)
return -EIO;
@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_READER_MODE, WM_SET, value);
+ r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
if (!r)
return -EIO;
@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_READER_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FN_LOCK, WM_SET, value);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
if (!r)
return -EIO;
@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
if (!r)
return -EIO;
@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
union acpi_object *r;
if (battery_limit_use_wmbb)
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
else
- r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
if (!r)
return -EIO;
@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
union acpi_object *r;
if (battery_limit_use_wmbb) {
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
status = r->buffer.pointer[0x10];
} else {
- r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
{
union acpi_object *r;
- r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
+ r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
kfree(r);
}
@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
val = 0;
if (brightness >= LED_FULL)
val = 0x24;
- r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
+ r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
kfree(r);
}
-static enum led_brightness get_kbd_backlight_level(void)
+static enum led_brightness get_kbd_backlight_level(struct device *dev)
{
union acpi_object *r;
int val;
- r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
+ r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
if (!r)
return LED_OFF;
@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
{
- return get_kbd_backlight_level();
+ return get_kbd_backlight_level(cdev->dev->parent);
}
static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
static int acpi_add(struct acpi_device *device)
{
+ struct platform_device_info pdev_info = {
+ .fwnode = acpi_fwnode_handle(device),
+ .name = PLATFORM_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ };
int ret;
const char *product;
int year = 2017;
@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
if (ret)
return ret;
- pf_device = platform_device_register_simple(PLATFORM_NAME,
- PLATFORM_DEVID_NONE,
- NULL, 0);
+ pf_device = platform_device_register_full(&pdev_info);
if (IS_ERR(pf_device)) {
ret = PTR_ERR(pf_device);
pf_device = NULL;
@@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
}
static const struct acpi_device_id device_ids[] = {
- {"LGEX0815", 0},
+ {"LGEX0820", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, device_ids);
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
index 31a139d87d9a..5edc294de6e4 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Apollo Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:apollolake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
index a7676f224075..e6a56d14b505 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Elkhart Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:elkhartlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
index 5e77e05fdb5d..f8849d0e48a8 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
@@ -81,6 +81,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Nuvoton GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt gpio_f7188x platform:elkhartlake-pinctrl platform:alderlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt.c b/drivers/platform/x86/siemens/simatic-ipc-batt.c
index c6dd263b4ee3..d9aff10608cf 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt.c
@@ -247,6 +247,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS core battery driver for Siemens Simatic IPCs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc.c b/drivers/platform/x86/siemens/simatic-ipc.c
index 8ca6e277fa03..7039874d8f11 100644
--- a/drivers/platform/x86/siemens/simatic-ipc.c
+++ b/drivers/platform/x86/siemens/simatic-ipc.c
@@ -231,6 +231,7 @@ static void __exit simatic_ipc_exit_module(void)
module_init(simatic_ipc_init_module);
module_exit(simatic_ipc_exit_module);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC platform driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index c6a10ec2c83f..f74af0a689f2 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -9,10 +9,13 @@
*/
#include <linux/acpi.h>
+#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/efi_embedded_fw.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kstrtox.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/string.h>
@@ -31,7 +34,6 @@ static const struct property_entry archos_101_cesium_educ_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
{ }
@@ -46,7 +48,6 @@ static const struct property_entry bush_bush_windows_tablet_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"),
{ }
@@ -76,7 +77,6 @@ static const struct property_entry chuwi_hi8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -92,7 +92,6 @@ static const struct property_entry chuwi_hi8_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -120,7 +119,6 @@ static const struct property_entry chuwi_hi10_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -136,7 +134,6 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -168,7 +165,6 @@ static const struct property_entry chuwi_hi10_pro_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -198,7 +194,6 @@ static const struct property_entry chuwi_hibook_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -224,7 +219,6 @@ static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -252,7 +246,6 @@ static const struct property_entry chuwi_vi10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -268,7 +261,6 @@ static const struct property_entry chuwi_surbook_mini_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
};
@@ -286,7 +278,6 @@ static const struct property_entry connect_tablet9_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -303,7 +294,6 @@ static const struct property_entry csl_panther_tab_hd_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -319,7 +309,6 @@ static const struct property_entry cube_iwork8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -343,7 +332,6 @@ static const struct property_entry cube_knote_i1101_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -357,7 +345,6 @@ static const struct property_entry dexp_ursus_7w_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -373,7 +360,6 @@ static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -388,7 +374,6 @@ static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -447,7 +432,6 @@ static const struct property_entry irbis_tw90_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -463,7 +447,6 @@ static const struct property_entry irbis_tw118_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -480,7 +463,6 @@ static const struct property_entry itworks_tw891_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -493,7 +475,6 @@ static const struct property_entry jumper_ezpad_6_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -508,7 +489,6 @@ static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -524,7 +504,6 @@ static const struct property_entry jumper_ezpad_6_m4_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -541,7 +520,6 @@ static const struct property_entry jumper_ezpad_7_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
{ }
};
@@ -558,7 +536,6 @@ static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -575,7 +552,6 @@ static const struct property_entry mpman_converter9_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -591,7 +567,6 @@ static const struct property_entry mpman_mpwin895cl_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -608,7 +583,6 @@ static const struct property_entry myria_my8307_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -625,7 +599,6 @@ static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -642,7 +615,6 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -666,7 +638,6 @@ static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -684,7 +655,6 @@ static const struct property_entry onda_v891_v5_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3676-onda-v891-v5.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -700,7 +670,6 @@ static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -717,7 +686,6 @@ static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -756,7 +724,6 @@ static const struct property_entry pipo_w11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -772,7 +739,6 @@ static const struct property_entry positivo_c4128b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -788,7 +754,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -805,7 +770,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -822,7 +786,6 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -839,7 +802,6 @@ static const struct property_entry predia_basic_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -856,7 +818,6 @@ static const struct property_entry rca_cambio_w101_v2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -871,7 +832,6 @@ static const struct property_entry rwc_nanote_p8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -887,7 +847,6 @@ static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -897,6 +856,21 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
.properties = schneider_sct101ctm_props,
};
+static const struct property_entry globalspace_solt_ivw116_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data globalspace_solt_ivw116_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = globalspace_solt_ivw116_props,
+};
+
static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
@@ -904,7 +878,6 @@ static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -920,7 +893,6 @@ static const struct property_entry teclast_tbook11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -946,7 +918,6 @@ static const struct property_entry teclast_x16_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -969,7 +940,6 @@ static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -985,7 +955,6 @@ static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -999,7 +968,6 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1013,7 +981,6 @@ static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1027,7 +994,6 @@ static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
@@ -1055,7 +1021,6 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1071,7 +1036,6 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1088,7 +1052,6 @@ static const struct property_entry viglen_connect_10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1102,7 +1065,6 @@ static const struct property_entry vinga_twizzle_j116_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1386,6 +1348,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6s Pro */
+ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
+ /* Above matches are too generic, add bios match */
+ DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
+ DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
+ },
+ },
+ {
/* Jumper EZpad 6 m4 */
.driver_data = (void *)&jumper_ezpad_6_m4_data,
.matches = {
@@ -1625,6 +1598,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* GlobalSpace SoLT IVW 11.6" */
+ .driver_data = (void *)&globalspace_solt_ivw116_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
+ },
+ },
+ {
/* Techbite Arc 11.6 */
.driver_data = (void *)&techbite_arc_11_6_data,
.matches = {
@@ -1817,7 +1799,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
{ }
};
-static const struct ts_dmi_data *ts_data;
+static struct ts_dmi_data *ts_data;
static void ts_dmi_add_props(struct i2c_client *client)
{
@@ -1852,6 +1834,64 @@ static int ts_dmi_notifier_call(struct notifier_block *nb,
return 0;
}
+#define MAX_CMDLINE_PROPS 16
+
+static struct property_entry ts_cmdline_props[MAX_CMDLINE_PROPS + 1];
+
+static struct ts_dmi_data ts_cmdline_data = {
+ .properties = ts_cmdline_props,
+};
+
+static int __init ts_parse_props(char *str)
+{
+ /* Save the original str to show it on syntax errors */
+ char orig_str[256];
+ char *name, *value;
+ u32 u32val;
+ int i, ret;
+
+ strscpy(orig_str, str);
+
+ /*
+ * str is part of the static_command_line from init/main.c and poking
+ * holes in that by writing 0 to it is allowed, as is taking long
+ * lasting references to it.
+ */
+ ts_cmdline_data.acpi_name = strsep(&str, ":");
+
+ for (i = 0; i < MAX_CMDLINE_PROPS; i++) {
+ name = strsep(&str, ":");
+ if (!name || !name[0])
+ break;
+
+ /* Replace '=' with 0 and make value point past '=' or NULL */
+ value = name;
+ strsep(&value, "=");
+ if (!value) {
+ ts_cmdline_props[i] = PROPERTY_ENTRY_BOOL(name);
+ } else if (isdigit(value[0])) {
+ ret = kstrtou32(value, 0, &u32val);
+ if (ret)
+ goto syntax_error;
+
+ ts_cmdline_props[i] = PROPERTY_ENTRY_U32(name, u32val);
+ } else {
+ ts_cmdline_props[i] = PROPERTY_ENTRY_STRING(name, value);
+ }
+ }
+
+ if (!i || str)
+ goto syntax_error;
+
+ ts_data = &ts_cmdline_data;
+ return 1;
+
+syntax_error:
+ pr_err("Invalid '%s' value for 'i2c_touchscreen_props='\n", orig_str);
+ return 1; /* "i2c_touchscreen_props=" is still a known parameter */
+}
+__setup("i2c_touchscreen_props=", ts_parse_props);
+
static struct notifier_block ts_dmi_notifier = {
.notifier_call = ts_dmi_notifier_call,
};
@@ -1859,13 +1899,25 @@ static struct notifier_block ts_dmi_notifier = {
static int __init ts_dmi_init(void)
{
const struct dmi_system_id *dmi_id;
+ struct ts_dmi_data *ts_data_dmi;
int error;
dmi_id = dmi_first_match(touchscreen_dmi_table);
- if (!dmi_id)
+ ts_data_dmi = dmi_id ? dmi_id->driver_data : NULL;
+
+ if (ts_data) {
+ /*
+ * Kernel cmdline provided data takes precedence, copy over
+ * DMI efi_embedded_fw info if available.
+ */
+ if (ts_data_dmi)
+ ts_data->embedded_fw = ts_data_dmi->embedded_fw;
+ } else if (ts_data_dmi) {
+ ts_data = ts_data_dmi;
+ } else {
return 0; /* Not an error */
+ }
- ts_data = dmi_id->driver_data;
/* Some dmi table entries only provide an efi_embedded_fw_desc */
if (!ts_data->properties)
return 0;
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 37372d7cc54a..f6a0627f36db 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -929,4 +929,5 @@ module_init(uv_sysfs_init);
module_exit(uv_sysfs_exit);
MODULE_AUTHOR("Hewlett Packard Enterprise");
+MODULE_DESCRIPTION("Sysfs structure for HPE UV systems");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index e95cdbbfb708..a220fe4f9ef8 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -14,11 +14,13 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
+MODULE_DESCRIPTION("Airplane mode button for AMD, HP & Xiaomi laptops");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
MODULE_ALIAS("acpi*:AMDI0051:*");
+MODULE_ALIAS("acpi*:LGEX0815:*");
struct wl_button {
struct input_dev *input_dev;
@@ -29,6 +31,7 @@ static const struct acpi_device_id wl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
{"AMDI0051", 0},
+ {"LGEX0815", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
index 6603461d4273..b591419de80c 100644
--- a/drivers/platform/x86/x86-android-tablets/Kconfig
+++ b/drivers/platform/x86/x86-android-tablets/Kconfig
@@ -6,6 +6,8 @@
config X86_ANDROID_TABLETS
tristate "X86 Android tablet support"
depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION
+ select NEW_LEDS
+ select LEDS_CLASS
help
X86 tablets which ship with Android as (part of) the factory image
typically have various problems with their DSDTs. The factory kernels
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index e64d5646b4c7..5fe68296501c 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -74,5 +74,6 @@ static struct platform_driver xo1_rfkill_driver = {
module_platform_driver(xo1_rfkill_driver);
MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_DESCRIPTION("OLPC XO-1 software RF kill switch");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:xo1-rfkill");
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 4b828d74a606..856eaac0ec14 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -393,6 +393,17 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
* automatically there. Just add a delay and suppose the handshake finish
* after that.
*/
+
+ /*
+ * For some BLK-CTL module (eg. AudioMix on i.MX8MP) doesn't have BUS
+ * clk-en bit, it is better to add delay here, as the BLK-CTL module
+ * doesn't need to care about how it is powered up.
+ *
+ * regmap_read_bypassed() is to make sure the above write IO transaction
+ * already reaches target before udelay()
+ */
+ regmap_read_bypassed(domain->regmap, domain->regs->hsk, &reg_val);
+ udelay(5);
}
/* Disable reset clocks for all devices in the domain */
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index e74a0f6a3157..4e80273dfb1e 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -6,6 +6,7 @@
extern struct mutex pnp_lock;
extern const struct attribute_group *pnp_dev_groups[];
+extern const struct bus_type pnp_bus_type;
int pnp_register_protocol(struct pnp_protocol *protocol);
void pnp_unregister_protocol(struct pnp_protocol *protocol);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 0a5d0d8befa8..3483e52e3a81 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -266,6 +266,12 @@ const struct bus_type pnp_bus_type = {
.dev_groups = pnp_dev_groups,
};
+bool dev_is_pnp(const struct device *dev)
+{
+ return dev->bus == &pnp_bus_type;
+}
+EXPORT_SYMBOL_GPL(dev_is_pnp);
+
int pnp_register_driver(struct pnp_driver *drv)
{
drv->driver.name = drv->name;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index af972cdc04b5..63d03a0df5cc 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -216,7 +216,6 @@ static struct parport_driver pps_parport_driver = {
.name = KBUILD_MODNAME,
.match_port = parport_attach,
.detach = parport_detach,
- .devmodel = true,
};
module_parport_driver(pps_parport_driver);
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index b3e084b75c23..d46eed159495 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -232,7 +232,6 @@ static struct parport_driver pps_gen_parport_driver = {
.name = KBUILD_MODNAME,
.match_port = parport_attach,
.detach = parport_detach,
- .devmodel = true,
};
module_parport_driver(pps_gen_parport_driver);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 7513018c9f9a..2067b0120d08 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
}
if (info->verify(info, pin, func, chan)) {
- pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ pr_err("driver cannot use function %u and channel %u on pin %u\n",
+ func, chan, pin);
return -EOPNOTSUPP;
}
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index a15460aaa03b..6b1b8f57cd95 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev,
if (max < ptp->n_vclocks)
goto out;
- size = sizeof(int) * max;
- vclock_index = kzalloc(size, GFP_KERNEL);
+ vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index a2f231d13a9f..8bae3fd2b330 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -321,22 +321,30 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
* First we need to find the minimal value for prescaler such that
*
* period_ns * clkrate
- * ------------------------------
+ * ------------------------------ < max_arr + 1
* NSEC_PER_SEC * (prescaler + 1)
*
- * isn't bigger than max_arr.
+ * This equation is equivalent to
+ *
+ * period_ns * clkrate
+ * ---------------------------- < prescaler + 1
+ * NSEC_PER_SEC * (max_arr + 1)
+ *
+ * Using integer division and knowing that the right hand side is
+ * integer, this is further equivalent to
+ *
+ * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler
*/
prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
- (u64)NSEC_PER_SEC * priv->max_arr);
- if (prescaler > 0)
- prescaler -= 1;
-
+ (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1));
if (prescaler > MAX_TIM_PSC)
return -EINVAL;
prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
(u64)NSEC_PER_SEC * (prescaler + 1));
+ if (!prd)
+ return -EINVAL;
/*
* All channels share the same prescaler and counter so when two
@@ -673,7 +681,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
* .apply() won't overflow.
*/
if (clk_get_rate(priv->clk) > 1000000000)
- return dev_err_probe(dev, -EINVAL, "Failed to lock clock\n");
+ return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n",
+ clk_get_rate(priv->clk));
chip->ops = &stm32pwm_ops;
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 5de69e0bb0f9..196c1c8b578c 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
int get_df_system_info(void);
int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
-int get_addr_hash_mi300(void);
+int get_umc_info_mi300(void);
int get_address_map(struct addr_ctx *ctx);
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 701349e84942..6979fa3d4fe2 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg)
if (reg == DF_FUNC0_ID_MI300) {
df_cfg.flags.heterogeneous = 1;
- if (get_addr_hash_mi300())
+ if (get_umc_info_mi300())
return -EINVAL;
}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 59b6169093f7..a1b4accf7b96 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -68,6 +68,8 @@ struct xor_bits {
};
#define NUM_BANK_BITS 4
+#define NUM_COL_BITS 5
+#define NUM_SID_BITS 2
static struct {
/* UMC::CH::AddrHashBank */
@@ -80,7 +82,22 @@ static struct {
u8 bank_xor;
} addr_hash;
+static struct {
+ u8 bank[NUM_BANK_BITS];
+ u8 col[NUM_COL_BITS];
+ u8 sid[NUM_SID_BITS];
+ u8 num_row_lo;
+ u8 num_row_hi;
+ u8 row_lo;
+ u8 row_hi;
+ u8 pc;
+} bit_shifts;
+
#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30)
+#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40)
+#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50)
+#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4)
#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
@@ -90,17 +107,42 @@ static struct {
#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8)
+#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12)
+
+#define ADDR_SEL_BANK0 GENMASK(3, 0)
+#define ADDR_SEL_BANK1 GENMASK(7, 4)
+#define ADDR_SEL_BANK2 GENMASK(11, 8)
+#define ADDR_SEL_BANK3 GENMASK(15, 12)
+#define ADDR_SEL_BANK4 GENMASK(20, 16)
+#define ADDR_SEL_ROW_LO GENMASK(27, 24)
+#define ADDR_SEL_ROW_HI GENMASK(31, 28)
+
+#define COL_SEL_LO_COL0 GENMASK(3, 0)
+#define COL_SEL_LO_COL1 GENMASK(7, 4)
+#define COL_SEL_LO_COL2 GENMASK(11, 8)
+#define COL_SEL_LO_COL3 GENMASK(15, 12)
+#define COL_SEL_LO_COL4 GENMASK(19, 16)
+
+#define ADDR_SEL_2_BANK5 GENMASK(4, 0)
+#define ADDR_SEL_2_CHAN GENMASK(15, 12)
+
/*
* Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
- * for hashing. Do this during module init, since the values will not
- * change during run time.
+ * for hashing.
+ *
+ * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to
+ * get the values needed to reconstruct the normalized address. Apply additional
+ * offsets to the raw register values, as needed.
+ *
+ * Do this during module init, since the values will not change during run time.
*
* These registers are instantiated for each UMC across each AMD Node.
* However, they should be identically programmed due to the fixed hardware
* design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
* single global structure for simplicity.
*/
-int get_addr_hash_mi300(void)
+int get_umc_info_mi300(void)
{
u32 temp;
int ret;
@@ -130,6 +172,44 @@ int get_addr_hash_mi300(void)
addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+ ret = amd_smn_read(0, MI300_ADDR_CFG, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp);
+ bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp);
+ bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp);
+ bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp);
+ bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp);
+ /* Use BankBit4 for the SID0 position. */
+ bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp);
+ bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp);
+ bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp);
+
+ ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp);
+ bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp);
+ bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp);
+ bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp);
+ bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp);
+ if (ret)
+ return ret;
+
+ /* Use BankBit5 for the SID1 position. */
+ bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp);
+ bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp);
+
return 0;
}
@@ -146,9 +226,6 @@ int get_addr_hash_mi300(void)
* The MCA address format is as follows:
* MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
*
- * The normalized address format is fixed in hardware and is as follows:
- * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
- *
* Additionally, the PC and Bank bits may be hashed. This must be accounted for before
* reconstructing the normalized address.
*/
@@ -158,18 +235,10 @@ int get_addr_hash_mi300(void)
#define MI300_UMC_MCA_PC BIT(25)
#define MI300_UMC_MCA_SID GENMASK(27, 26)
-#define MI300_NA_COL_1_0 GENMASK(6, 5)
-#define MI300_NA_PC BIT(7)
-#define MI300_NA_COL_3_2 GENMASK(9, 8)
-#define MI300_NA_BANK_3_2 GENMASK(11, 10)
-#define MI300_NA_BANK_1_0 GENMASK(13, 12)
-#define MI300_NA_COL_4 BIT(14)
-#define MI300_NA_ROW GENMASK(28, 15)
-#define MI300_NA_SID GENMASK(30, 29)
-
static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
{
- u16 i, col, row, bank, pc, sid, temp;
+ u16 i, col, row, bank, pc, sid;
+ u32 temp;
col = FIELD_GET(MI300_UMC_MCA_COL, addr);
bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
@@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
/* Calculate hash for PC bit. */
if (addr_hash.pc.xor_enable) {
- /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
- bank |= sid << 5;
-
temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
- temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */
+ temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor);
pc ^= temp;
-
- /* Drop SID bits for the sake of debug printing later. */
- bank &= 0x1F;
}
/* Reconstruct the normalized address starting with NA[4:0] = 0 */
addr = 0;
- /* NA[6:5] = Column[1:0] */
- temp = col & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
-
- /* NA[7] = PC */
- addr |= FIELD_PREP(MI300_NA_PC, pc);
-
- /* NA[9:8] = Column[3:2] */
- temp = (col >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+ /* Column bits */
+ for (i = 0; i < NUM_COL_BITS; i++) {
+ temp = (col >> i) & 0x1;
+ addr |= temp << bit_shifts.col[i];
+ }
- /* NA[11:10] = Bank[3:2] */
- temp = (bank >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+ /* Bank bits */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ temp = (bank >> i) & 0x1;
+ addr |= temp << bit_shifts.bank[i];
+ }
- /* NA[13:12] = Bank[1:0] */
- temp = bank & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+ /* Row lo bits */
+ for (i = 0; i < bit_shifts.num_row_lo; i++) {
+ temp = (row >> i) & 0x1;
+ addr |= temp << (i + bit_shifts.row_lo);
+ }
- /* NA[14] = Column[4] */
- temp = (col >> 4) & 0x1;
- addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+ /* Row hi bits */
+ for (i = 0; i < bit_shifts.num_row_hi; i++) {
+ temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1;
+ addr |= temp << (i + bit_shifts.row_hi);
+ }
- /* NA[28:15] = Row[13:0] */
- addr |= FIELD_PREP(MI300_NA_ROW, row);
+ /* PC bit */
+ addr |= pc << bit_shifts.pc;
- /* NA[30:29] = SID[1:0] */
- addr |= FIELD_PREP(MI300_NA_SID, sid);
+ /* SID bits */
+ for (i = 0; i < NUM_SID_BITS; i++) {
+ temp = (sid >> i) & 0x1;
+ addr |= temp << bit_shifts.sid[i];
+ }
pr_debug("Addr=0x%016lx", addr);
pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 34fcdd82b2ea..f3c447ecdc3b 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -140,7 +140,7 @@
#define AXP717_DCDC1_NUM_VOLTAGES 88
#define AXP717_DCDC2_NUM_VOLTAGES 107
-#define AXP717_DCDC3_NUM_VOLTAGES 104
+#define AXP717_DCDC3_NUM_VOLTAGES 103
#define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0)
#define AXP717_LDO_V_OUT_MASK GENMASK(4, 0)
@@ -763,10 +763,15 @@ static const struct linear_range axp717_dcdc1_ranges[] = {
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
};
+/*
+ * The manual says that the last voltage is 3.4V, encoded as 0b1101011 (107),
+ * but every other method proves that this is wrong, so it's really 106 that
+ * programs the final 3.4V.
+ */
static const struct linear_range axp717_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
- REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000),
+ REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000),
};
static const struct linear_range axp717_dcdc3_ranges[] = {
@@ -790,40 +795,40 @@ static const struct regulator_desc axp717_regulators[] = {
AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO2, "aldo2", "aldoin", 500, 3500, 100,
AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO3, "aldo3", "aldoin", 500, 3500, 100,
AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO4, "aldo4", "aldoin", 500, 3500, 100,
AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO1, "bldo1", "bldoin", 500, 3500, 100,
AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(4)),
- AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO2, "bldo2", "bldoin", 500, 3500, 100,
AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(5)),
- AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO3, "bldo3", "bldoin", 500, 3500, 100,
AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(6)),
- AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO4, "bldo4", "bldoin", 500, 3500, 100,
AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(7)),
- AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO1, "cldo1", "cldoin", 500, 3500, 100,
AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO2, "cldo2", "cldoin", 500, 3500, 100,
AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO3, "cldo3", "cldoin", 500, 3500, 100,
AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO4, "cldo4", "cldoin", 500, 3500, 100,
AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50,
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 26192d55a685..79fbb45297f6 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
* 10: 2.50mV/usec 10mV 4uS
* 11: 1.25mV/usec 10mV 8uS
*/
-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
+static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5794f4e9dd52..844e9587a880 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
return map ? map : ERR_PTR(-EOPNOTSUPP);
}
+EXPORT_SYMBOL_GPL(regulator_get_regmap);
/**
* regulator_get_hardware_vsel_register - get the HW voltage selector register
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index b90e53d922d6..c31b6dc3229c 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -228,6 +228,11 @@ static const struct regulator_ops rtq2208_regulator_ldo_ops = {
.set_suspend_disable = rtq2208_set_suspend_disable,
};
+static struct of_regulator_match rtq2208_ldo_match[] = {
+ {.name = "ldo2", },
+ {.name = "ldo1", },
+};
+
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -322,8 +327,7 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_fixed_voltage(struct device *dev,
- struct of_regulator_match *rtq2208_ldo_match, int n_fixed)
+static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
{
struct device_node *np;
struct of_regulator_match *match;
@@ -338,14 +342,14 @@ static int rtq2208_of_get_fixed_voltage(struct device *dev,
if (!np)
np = dev->of_node;
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, n_fixed);
+ ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
of_node_put(np);
if (ret < 0)
return ret;
- for (i = 0; i < n_fixed; i++) {
+ for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
match = rtq2208_ldo_match + i;
init_data = match->init_data;
rdesc = (struct rtq2208_regulator_desc *)match->driver_data;
@@ -388,8 +392,7 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel,
- int idx, struct of_regulator_match *rtq2208_ldo_match, int *ldo_idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
{
struct regulator_desc *desc;
static const struct {
@@ -461,8 +464,7 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
{
- struct of_regulator_match rtq2208_ldo_match[2];
- int mtp_sel, ret, i, idx, ldo_idx = 0;
+ int mtp_sel, i, idx, ret;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -474,7 +476,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, rtq2208_ldo_match, &ldo_idx);
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
/* init ldo dvs ability */
if (idx >= RTQ2208_LDO2)
@@ -482,7 +484,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
}
/* init ldo fixed_uV */
- ret = rtq2208_of_get_fixed_voltage(dev, rtq2208_ldo_match, ldo_idx);
+ ret = rtq2208_of_get_ldo_dvs_ability(dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index 4a859f4c0f83..ac53792e3fed 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -653,18 +653,14 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
}
}
- if (tps->chip_id == LP8764) {
- nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = 0;
- nr_types = REGS_INT_NB;
- } else if (tps->chip_id == TPS65224) {
+ if (tps->chip_id == TPS65224) {
nr_buck = ARRAY_SIZE(tps65224_buck_regs);
nr_ldo = ARRAY_SIZE(tps65224_ldo_regs);
- nr_types = REGS_INT_NB;
+ nr_types = TPS65224_REGS_INT_NB;
} else {
nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = ARRAY_SIZE(tps6594_ldo_regs);
- nr_types = TPS65224_REGS_INT_NB;
+ nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs);
+ nr_types = REGS_INT_NB;
}
reg_irq_nb = nr_types * (nr_buck + nr_ldo);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index d53ee34d398f..fbe29cabcbb8 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1293,6 +1293,7 @@ sclp_init(void)
fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
+ list_del(&sclp_state_change_event.list);
sclp_init_state = sclp_init_state_uninitialized;
free_page((unsigned long) sclp_read_sccb);
free_page((unsigned long) sclp_init_sccb);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index d7569f395559..d6491fc84e8c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -698,6 +698,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
+ dma32_t indicatorp_dma = 0;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
@@ -725,7 +726,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*indicatorp),
- &ccw->cda);
+ &indicatorp_dma);
if (!indicatorp)
goto out;
*indicatorp = indicators_dma(vcdev);
@@ -735,6 +736,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* no error, just fall back to legacy interrupts */
vcdev->is_thinint = false;
}
+ ccw->cda = indicatorp_dma;
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
*indicators(vcdev) = 0;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a226dc1b65d7..4eb0837298d4 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
}
}
-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static void alua_handle_state_transition(struct scsi_device *sdev)
{
struct alua_dh_data *h = sdev->handler_data;
struct alua_port_group *pg;
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+ rcu_read_unlock();
+ alua_check(sdev, false);
+}
+
+static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
- rcu_read_lock();
- pg = rcu_dereference(h->pg);
- if (pg)
- pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
- rcu_read_unlock();
- alua_check(sdev, false);
+ alua_handle_state_transition(sdev);
return NEEDS_RETRY;
}
break;
case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+ alua_handle_state_transition(sdev);
+ return NEEDS_RETRY;
+ }
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
/*
* Power On, Reset, or Bus Device Reset.
@@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, &sense_hdr);
- if (sense_hdr.sense_key == NOT_READY &&
+ if ((sense_hdr.sense_key == NOT_READY ||
+ sense_hdr.sense_key == UNIT_ATTENTION) &&
sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
return SCSI_DH_RETRY;
else if (retval)
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 21339da505f1..60e4f101c2bf 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1275,7 +1275,6 @@ static struct parport_driver imm_driver = {
.name = "imm",
.match_port = imm_attach,
.detach = imm_detach,
- .devmodel = true,
};
module_parport_driver(imm_driver);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4c69fc63c119..cbbe43d8ef87 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -610,15 +610,15 @@ int sas_ata_init(struct domain_device *found_dev)
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
- goto destroy_port;
+ goto free_port;
found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
-destroy_port:
- kfree(ap);
+free_port:
+ ata_port_free(ap);
free_host:
ata_host_put(ata_host);
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8fb7c41c0962..48d975c6dbf2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
- kfree(dev->sata_dev.ap);
+ ata_port_free(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 1638109a68a0..cd261b48eb46 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2163,10 +2163,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(persistent_id);
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_enable attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+
+ if (!sdev_priv_data)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
+
static struct attribute *mpi3mr_dev_attrs[] = {
&dev_attr_sas_address.attr,
&dev_attr_device_handle.attr,
&dev_attr_persistent_id.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 329cc6ec3b58..82aa4e418c5a 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1364,7 +1364,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
continue;
if (i > sizeof(mr_sas_port->phy_mask) * 8) {
- ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n",
+ ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
i, sizeof(mr_sas_port->phy_mask) * 8);
goto out_fail;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 258647fc6bdd..b2bcf4a27ddc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4774,7 +4774,7 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
char desc[17] = {0};
u32 iounit_pg1_flags;
- strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc));
+ memtostr(desc, ioc->manu_pg0.ChipName);
ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
desc,
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
@@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pd_handles_sz++;
+ /*
+ * pd_handles_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
+ */
+ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
+
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
GFP_KERNEL);
if (!ioc->pd_handles) {
@@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pend_os_device_add_sz++;
+
+ /*
+ * pend_os_device_add_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory may occur.
+ */
+ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
+ sizeof(unsigned long));
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
if (!ioc->pend_os_device_add) {
@@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
if (ioc->facts.MaxDevHandle % 8)
pd_handles_sz++;
+ /*
+ * pd_handles should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may
+ * occur.
+ */
+ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
GFP_KERNEL);
if (!pd_handles) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bf100a4ebfc3..fe1e96fda284 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -2048,9 +2048,6 @@ void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
-/* NCQ Prio Handling Check */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
-
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1c9fd26195b8..87784c96249a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
}
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
@@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
if (kstrtobool(buf, &ncq_prio_enable))
return -EINVAL;
- if (!scsih_ncq_prio_supp(sdev))
+ if (!sas_ata_ncq_prio_supported(sdev))
return -EINVAL;
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 89ef43a5ef86..870ec2cb4af4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -302,8 +302,8 @@ struct _scsi_io_transfer {
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
- * @val: ?
- * @kp: ?
+ * @val: value of the parameter to be set
+ * @kp: pointer to kernel_param structure
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
@@ -12571,29 +12571,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/**
- * scsih_ncq_prio_supp - Check for NCQ command priority support
- * @sdev: scsi device struct
- *
- * This is called when a user indicates they would like to enable
- * ncq command priorities. This works only on SATA devices.
- */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
-{
- struct scsi_vpd *vpd;
- bool ncq_prio_supp = false;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
-
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
-
- return ncq_prio_supp;
-}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 76f9a9177198..d84413b77d84 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -458,17 +458,13 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto out;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- sizeof(edev->vendor_id));
- strscpy(edev->product_id, manufacture_reply->product_id,
- sizeof(edev->product_id));
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- sizeof(edev->product_rev));
+ memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+ memtostr(edev->product_id, manufacture_reply->product_id);
+ memtostr(edev->product_rev, manufacture_reply->product_rev);
edev->level = manufacture_reply->sas_format & 1;
if (edev->level) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- sizeof(edev->component_vendor_id));
+ memtostr(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 8300f0bdddb3..3741972bc768 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -1151,7 +1151,6 @@ static struct parport_driver ppa_driver = {
.name = "ppa",
.match_port = ppa_attach,
.detach = ppa_detach,
- .devmodel = true,
};
module_parport_driver(ppa_driver);
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5058e01b65a2..98afdfe63600 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -363,6 +363,7 @@ struct qedf_ctx {
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
#define QEDF_PROBING 8
+#define QEDF_STAG_IN_PROGRESS 9
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index fd12439cbaab..49adddf978cc 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ qedf->flogi_pending++;
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ qedf->flogi_pending = 0;
+ }
+
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
schedule_delayed_work(&qedf->stag_work, 2);
return NULL;
}
- qedf->flogi_pending++;
+
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qedf_ctx *qedf;
struct qed_link_output if_link;
+ qedf = lport_priv(lport);
+
if (lport->vport) {
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
- qedf = lport_priv(lport);
-
qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
if (!if_link.link_up) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
"Physical link is not up.\n");
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
return;
}
/* Flush and wait to make sure link down is processed */
@@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
"Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
}
/* Reset the host by gracefully logging out and then logging back in */
@@ -3463,6 +3473,7 @@ retry_probe:
}
/* Start the Slowpath-process */
+ memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
slowpath_params.int_mode = QED_INT_MODE_MSIX;
slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
@@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
int rc;
+ int cnt = 0;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
return;
}
+stag_in_prog:
+ if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
+ cnt++;
+
+ if (cnt < 5) {
+ msleep(500);
+ goto stag_in_prog;
+ }
+ }
+
if (mode != QEDF_MODE_RECOVERY)
set_bit(QEDF_UNLOADING, &qedf->flags);
@@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
+ if (!qedf) {
+ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ return;
+ }
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Already is in recovery, hence not calling software context reset.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ return;
+ }
+
+ set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
dev_name(&qedf->pdev->dev), __func__, __LINE__,
qedf->dbg_ctx.host_no);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3e0c0381277a..ee69bd35889d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
if (result < SCSI_VPD_HEADER_SIZE)
return 0;
+ if (result > sizeof(vpd)) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: long VPD page 0 length: %d bytes\n",
+ __func__, result);
+ result = sizeof(vpd);
+ }
+
result -= SCSI_VPD_HEADER_SIZE;
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
return 0;
@@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
sdev->use_10_for_rw = 0;
sdev->cdl_supported = 1;
+
+ /*
+ * If the device supports CDL, make sure that the current drive
+ * feature status is consistent with the user controlled
+ * cdl_enable state.
+ */
+ scsi_cdl_enable(sdev, sdev->cdl_enable);
} else {
sdev->cdl_supported = 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 424a89513814..4e33f1661e4c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
}
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+/**
+ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
+ * @sdev: SCSI device
+ *
+ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
+ * Information). Since this VPD page is implemented only for ATA devices,
+ * this function always returns false for SCSI devices.
+ */
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
+{
+ struct scsi_vpd *vpd;
+ bool ncq_prio_supported = false;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd && vpd->len >= 214)
+ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
+ rcu_read_unlock();
+
+ return ncq_prio_supported;
+}
+EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
+
/*
* SAS Phy attributes
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 332eb9dac22d..6b64af7d4927 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -63,6 +63,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
@@ -3118,6 +3119,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
int res;
+ if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
+ return;
+
res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
/*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
@@ -3565,16 +3569,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
static void sd_read_block_zero(struct scsi_disk *sdkp)
{
- unsigned int buf_len = sdkp->device->sector_size;
- char *buffer, cmd[10] = { };
+ struct scsi_device *sdev = sdkp->device;
+ unsigned int buf_len = sdev->sector_size;
+ u8 *buffer, cmd[16] = { };
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer)
return;
- cmd[0] = READ_10;
- put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
- put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ if (sdev->use_16_for_rw) {
+ cmd[0] = READ_16;
+ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
+ } else {
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ }
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
SD_TIMEOUT, sdkp->max_retries, NULL);
@@ -3700,8 +3711,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->first_scan ||
q->limits.max_sectors > q->limits.max_dev_sectors ||
- q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors > q->limits.max_hw_sectors) {
q->limits.max_sectors = rw_max;
+ q->limits.max_user_sectors = rw_max;
+ }
sdkp->first_scan = 0;
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1175f2e213b5..dc899277b3a4 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
int sr_reset(struct cdrom_device_info *);
-int sr_select_speed(struct cdrom_device_info *cdi, int speed);
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
int sr_is_xa(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 5b0b35e60e61..a0d2556a27bb 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
return 0;
}
-int sr_select_speed(struct cdrom_device_info *cdi, int speed)
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
+ /* avoid exceeding the max speed or overflowing integer bounds */
+ speed = clamp(0, speed, 0xffff / 177);
+
if (speed == 0)
speed = 0xffff; /* set to max */
else
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index 9e01642e72de..d6f936464063 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -148,5 +148,6 @@ static struct platform_driver siox_gpio_driver = {
module_platform_driver(siox_gpio_driver);
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("SIOX GPIO bus driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
index 1d6b38657917..863ab3075d7e 100644
--- a/drivers/slimbus/stream.c
+++ b/drivers/slimbus/stream.c
@@ -18,15 +18,17 @@
* and the first slot of the next consecutive Segment.
* @segdist_code: Segment Distribution Code SD[11:0]
* @seg_offset_mask: Segment offset mask in SD[11:0]
- * @segdist_codes: List of all possible Segmet Distribution codes.
*/
-static const struct segdist_code {
+struct segdist_code {
int ratem;
int seg_interval;
int segdist_code;
u32 seg_offset_mask;
-} segdist_codes[] = {
+};
+
+/* segdist_codes - List of all possible Segment Distribution codes. */
+static const struct segdist_code segdist_codes[] = {
{1, 1536, 0x200, 0xdff},
{2, 768, 0x100, 0xcff},
{4, 384, 0x080, 0xc7f},
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index b6bfd6729df3..d27667283846 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -127,8 +127,8 @@ static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
{
- fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups),
- fuse->soc->num_lookups, GFP_KERNEL);
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, fuse->soc->num_lookups,
+ sizeof(*fuse->lookups), GFP_KERNEL);
if (!fuse->lookups)
return -ENOMEM;
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 20d94bcfc9b4..795e223f7e5c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -571,6 +571,9 @@ static int sdw_master_read_amd_prop(struct sdw_bus *bus)
amd_manager->wake_en_mask = wake_en_mask;
fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
amd_manager->power_mode_mask = power_mode_mask;
+
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 17cf27e6ea73..18517121cc89 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -155,8 +155,10 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
intel_prop = devm_kzalloc(bus->dev, sizeof(*intel_prop), GFP_KERNEL);
- if (!intel_prop)
+ if (!intel_prop) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
/* initialize with hardware defaults, in case the properties are not found */
intel_prop->doaise = 0x1;
@@ -184,6 +186,8 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
intel_prop->dodse,
intel_prop->dods);
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 55a9c51c84c1..e5d9df26d4dc 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -66,8 +66,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq,
sizeof(*prop->clk_freq),
GFP_KERNEL);
- if (!prop->clk_freq)
+ if (!prop->clk_freq) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported",
@@ -92,8 +94,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
sizeof(*prop->clk_gears),
GFP_KERNEL);
- if (!prop->clk_gears)
+ if (!prop->clk_gears) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-supported-clock-gears",
@@ -116,6 +120,8 @@ int sdw_master_read_prop(struct sdw_bus *bus)
fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
&prop->err_threshold);
+ fwnode_handle_put(link);
+
return 0;
}
EXPORT_SYMBOL(sdw_master_read_prop);
@@ -197,8 +203,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_words,
sizeof(*dpn[i].words),
GFP_KERNEL);
- if (!dpn[i].words)
+ if (!dpn[i].words) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-port-wordlength-configs",
@@ -236,8 +244,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_channels,
sizeof(*dpn[i].channels),
GFP_KERNEL);
- if (!dpn[i].channels)
+ if (!dpn[i].channels) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list",
@@ -251,8 +261,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_ch_combinations,
sizeof(*dpn[i].ch_combinations),
GFP_KERNEL);
- if (!dpn[i].ch_combinations)
+ if (!dpn[i].ch_combinations) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-combination-list",
@@ -274,6 +286,8 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
/* TODO: Read audio mode */
+ fwnode_handle_put(node);
+
i++;
}
@@ -348,10 +362,14 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
prop->dp0_prop = devm_kzalloc(&slave->dev,
sizeof(*prop->dp0_prop),
GFP_KERNEL);
- if (!prop->dp0_prop)
+ if (!prop->dp0_prop) {
+ fwnode_handle_put(port);
return -ENOMEM;
+ }
sdw_slave_read_dp0(slave, port, prop->dp0_prop);
+
+ fwnode_handle_put(port);
}
/*
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 1d267e6c22a4..84eb454ed443 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -315,7 +315,6 @@ static struct parport_driver butterfly_driver = {
.name = "spi_butterfly",
.match_port = butterfly_attach,
.detach = butterfly_detach,
- .devmodel = true,
};
module_parport_driver(butterfly_driver);
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 2209e9fc378f..2e3eacd46b72 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -145,6 +145,9 @@
#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
#define CDNS_XSPI_TRD_STATUS 0x0104
+#define MODE_NO_OF_BYTES GENMASK(25, 24)
+#define MODEBYTES_COUNT 1
+
/* Helper macros for filling command registers */
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
@@ -157,9 +160,10 @@
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+ FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
@@ -173,12 +177,12 @@
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
((op)->data.nbytes >> 16) & 0xffff) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
(op)->dummy.buswidth != 0 ? \
- (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+ (((dummybytes) * 8) / (op)->dummy.buswidth) : \
0))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
u32 cmd_regs[6];
u32 cmd_status;
int ret;
+ int dummybytes = op->dummy.nbytes;
ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
if (ret < 0)
@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
memset(cmd_regs, 0, sizeof(cmd_regs));
cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
- cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+ if (dummybytes != 0) {
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
+ dummybytes--;
+ } else {
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
+ }
cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
cdns_xspi->cur_cs);
@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
- cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
cdns_xspi->cur_cs);
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 9d747ea69926..8b618ef0f711 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -26,7 +26,7 @@
#include <linux/units.h>
#define CS42L43_FIFO_SIZE 16
-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
+#define CS42L43_SPI_ROOT_HZ 49152000
#define CS42L43_SPI_MAX_LENGTH 65532
enum cs42l43_spi_cmd {
@@ -54,7 +54,7 @@ static const struct software_node ampr = {
static struct spi_board_info ampl_info = {
.modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
+ .max_speed_hz = 11 * HZ_PER_MHZ,
.chip_select = 0,
.mode = SPI_MODE_0,
.swnode = &ampl,
@@ -62,7 +62,7 @@ static struct spi_board_info ampl_info = {
static struct spi_board_info ampr_info = {
.modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
+ .max_speed_hz = 11 * HZ_PER_MHZ,
.chip_select = 1,
.mode = SPI_MODE_0,
.swnode = &ampr,
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f4006c82f867..33164ebdb583 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
- if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- } else {
- if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- }
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
}
/* set clock speed */
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 3c0c24ed1f3d..e61e89b4119f 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -318,7 +318,6 @@ static struct parport_driver spi_lm70llp_drv = {
.name = DRVNAME,
.match_port = spi_lm70llp_attach,
.detach = spi_lm70llp_detach,
- .devmodel = true,
};
module_parport_driver(spi_lm70llp_drv);
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f1e922fd362a..955c920c4b63 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
static int stm32_qspi_get_mode(u8 buswidth)
{
- if (buswidth == 4)
+ if (buswidth >= 4)
return CCR_BUSWIDTH_4;
return buswidth;
@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
return -EINVAL;
mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
- if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
- ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
- gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
dev_err(qspi->dev, "configuration not supported\n");
@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
/*
- * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
- * are both set in spi->mode and "cs-gpios" properties is found in DT
+ * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
+ * is set in spi->mode and "cs-gpios" properties is found in DT
*/
- if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ if (mode) {
qspi->cr_reg |= CR_DFM;
dev_dbg(qspi->dev, "Dual flash mode enable");
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4a68abcdcc35..4c4ff074e3f6 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1016,8 +1016,10 @@ end_irq:
static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
+ stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
- dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
- stm32_spi_enable(spi);
-
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
+ stm32fx_spi_disable(spi);
}
}
@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
+ spi->cfg->disable(spi);
}
/**
@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
+ stm32_spi_enable(spi);
+
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
spi->cfg->write_tx(spi);
@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
+ stm32_spi_enable(spi);
+
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
+
+ stm32_spi_enable(spi);
}
/**
@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
+ stm32_spi_enable(spi);
+
if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 289feccca376..c957a7710777 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -689,10 +689,12 @@ static int __spi_add_device(struct spi_device *spi)
* Make sure that multiple logical CS doesn't map to the same physical CS.
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
- if (status)
- return status;
+ if (!spi_controller_is_target(ctlr)) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
+ }
}
/* Set the bus ID string */
@@ -1220,6 +1222,11 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
+/* Dummy SG for unidirect transfers */
+static struct scatterlist dummy_sg = {
+ .page_link = SG_END,
+};
+
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@@ -1243,6 +1250,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
else
rx_dev = ctlr->dev.parent;
+ ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
@@ -1257,6 +1265,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
attrs);
if (ret != 0)
return ret;
+ } else {
+ xfer->tx_sg.sgl = &dummy_sg;
}
if (xfer->rx_buf != NULL) {
@@ -1270,8 +1280,13 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return ret;
}
+ } else {
+ xfer->rx_sg.sgl = &dummy_sg;
}
}
+ /* No transfer has been mapped, bail out with success */
+ if (ret)
+ return 0;
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
@@ -1307,7 +1322,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0;
}
-static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
@@ -1316,11 +1331,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr,
if (!ctlr->cur_msg_mapped)
return;
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+ return;
+
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
-static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
@@ -1329,6 +1347,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
if (!ctlr->cur_msg_mapped)
return;
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+ return;
+
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
@@ -1346,11 +1367,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_message *msg,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_message *msg,
struct spi_transfer *xfer)
{
}
@@ -1622,10 +1645,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
- spi_dma_sync_for_device(ctlr, xfer);
+ spi_dma_sync_for_device(ctlr, msg, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
- spi_dma_sync_for_cpu(ctlr, xfer);
+ spi_dma_sync_for_cpu(ctlr, msg, xfer);
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
@@ -1650,7 +1673,7 @@ fallback_pio:
msg->status = ret;
}
- spi_dma_sync_for_cpu(ctlr, xfer);
+ spi_dma_sync_for_cpu(ctlr, msg, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
@@ -4135,7 +4158,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
- xfer->tx_nbits != SPI_NBITS_QUAD)
+ xfer->tx_nbits != SPI_NBITS_QUAD &&
+ xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
@@ -4150,7 +4174,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
- xfer->rx_nbits != SPI_NBITS_QUAD)
+ xfer->rx_nbits != SPI_NBITS_QUAD &&
+ xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
@@ -4357,6 +4382,34 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
return ctlr->transfer(spi, message);
}
+static void devm_spi_unoptimize_message(void *msg)
+{
+ spi_unoptimize_message(msg);
+}
+
+/**
+ * devm_spi_optimize_message - managed version of spi_optimize_message()
+ * @dev: the device that manages @msg (usually @spi->dev)
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ *
+ * spi_unoptimize_message() will automatically be called when the device is
+ * removed.
+ */
+int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg)
+{
+ int ret;
+
+ ret = spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
+}
+EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
+
/**
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 6c1f91c859ca..e6ad088636f6 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -109,12 +109,12 @@ static int adt7316_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adt7316_i2c_id[] = {
- { "adt7316", 0 },
- { "adt7317", 0 },
- { "adt7318", 0 },
- { "adt7516", 0 },
- { "adt7517", 0 },
- { "adt7519", 0 },
+ { "adt7316" },
+ { "adt7317" },
+ { "adt7318" },
+ { "adt7516" },
+ { "adt7517" },
+ { "adt7519" },
{ }
};
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 79467f056a05..f4260786d50a 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -209,15 +209,6 @@ struct adt7316_chip_info {
#define ADT7316_TEMP_AIN_INT_MASK \
(ADT7316_TEMP_INT_MASK)
-/*
- * struct adt7316_chip_info - chip specific information
- */
-
-struct adt7316_limit_regs {
- u16 data_high;
- u16 data_low;
-};
-
static ssize_t adt7316_show_enabled(struct device *dev,
struct device_attribute *attr,
char *buf)
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index b7af5fe63e09..cd00d9607565 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -721,8 +721,8 @@ static int ad5933_probe(struct i2c_client *client)
}
static const struct i2c_device_id ad5933_id[] = {
- { "ad5933", 0 },
- { "ad5934", 0 },
+ { "ad5933" },
+ { "ad5934" },
{}
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 297af1d80b12..5f518e5a9273 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -707,7 +707,7 @@ int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance
* block forever.
*/
for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
- if (state)
+ if (vchiq_remote_initialised(state))
break;
usleep_range(500, 600);
}
@@ -1202,7 +1202,7 @@ void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f
{
int i;
- if (!state)
+ if (!vchiq_remote_initialised(state))
return;
/*
@@ -1759,7 +1759,7 @@ static int vchiq_probe(struct platform_device *pdev)
if (err)
goto failed_platform_init;
- vchiq_debugfs_init();
+ vchiq_debugfs_init(&mgmt->state);
dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 8af209e34fb2..382ec08f6a14 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -413,6 +413,11 @@ struct vchiq_state {
struct opaque_platform_state *platform_state;
};
+static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
+{
+ return state->remote && state->remote->initialised;
+}
+
struct bulk_waiter {
struct vchiq_bulk *bulk;
struct completion event;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 54e7bf029d9a..d5f7f61c5626 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -42,9 +42,9 @@ static int debugfs_trace_show(struct seq_file *f, void *offset)
static int vchiq_dump_show(struct seq_file *f, void *offset)
{
- struct vchiq_instance *instance = f->private;
+ struct vchiq_state *state = f->private;
- vchiq_dump_state(f, instance->state);
+ vchiq_dump_state(f, state);
return 0;
}
@@ -121,12 +121,12 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
debugfs_remove_recursive(node->dentry);
}
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
- debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL,
+ debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state,
&vchiq_dump_fops);
}
@@ -138,7 +138,7 @@ void vchiq_debugfs_deinit(void)
#else /* CONFIG_DEBUG_FS */
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index e9bf055a4ca9..fabffd81b1ec 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -10,7 +10,7 @@ struct vchiq_debugfs_node {
struct dentry *dentry;
};
-void vchiq_debugfs_init(void);
+void vchiq_debugfs_init(struct vchiq_state *state);
void vchiq_debugfs_deinit(void);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 3c63347d2d08..430f2ed2ccd3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -1170,6 +1170,11 @@ static int vchiq_open(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: vchiq open\n");
+ if (!vchiq_remote_initialised(state)) {
+ dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n");
+ return -ENOTCONN;
+ }
+
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
@@ -1200,7 +1205,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: instance=%p\n", instance);
- if (!state) {
+ if (!vchiq_remote_initialised(state)) {
ret = -EPERM;
goto out;
}
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index e0fdc497bfcc..fd5527188cf9 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -55,7 +55,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (cur_state <= instance->lower)
return THERMAL_NO_TARGET;
- return clamp(cur_state - 1, instance->lower, instance->upper);
+ /*
+ * If 'throttle' is false, no mitigation is necessary, so
+ * request the lower state for this instance.
+ */
+ return instance->lower;
}
return instance->target;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 14e34eabc419..4a1bfebb1b8e 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
{
struct proc_thermal_pci *pci_info = devid;
struct proc_thermal_device *proc_priv;
- int ret = IRQ_HANDLED;
+ int ret = IRQ_NONE;
u32 status;
proc_priv = pci_info->proc_priv;
@@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
/* Disable enable interrupt flag */
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
pkg_thermal_schedule_work(&pci_info->work);
+ ret = IRQ_HANDLED;
}
pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 0bb3a495b56e..819ed0110f3e 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -769,7 +769,11 @@ static int lvts_golden_temp_init(struct device *dev, u8 *calib,
*/
gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff;
- if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
+ /* A zero value for gt means that device has invalid efuse data */
+ if (!gt)
+ return -ENODATA;
+
+ if (gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset;
@@ -1458,7 +1462,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1469,7 +1472,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
@@ -1483,7 +1485,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(0, 1, 0, 0),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1496,7 +1497,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 1, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1507,7 +1507,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x200,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1518,7 +1517,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x300,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 54cce4e523bc..1b0ab2790860 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -467,6 +467,21 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor,
governor->trip_crossed(tz, trip, crossed_up);
}
+static void thermal_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_governor *governor,
+ bool crossed_up)
+{
+ if (crossed_up) {
+ thermal_notify_tz_trip_up(tz, trip);
+ thermal_debug_tz_trip_up(tz, trip);
+ } else {
+ thermal_notify_tz_trip_down(tz, trip);
+ thermal_debug_tz_trip_down(tz, trip);
+ }
+ thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
+}
+
static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a,
const struct list_head *b)
{
@@ -506,18 +521,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp);
- list_for_each_entry(td, &way_up_list, notify_list_node) {
- thermal_notify_tz_trip_up(tz, &td->trip);
- thermal_debug_tz_trip_up(tz, &td->trip);
- thermal_governor_trip_crossed(governor, tz, &td->trip, true);
- }
+ list_for_each_entry(td, &way_up_list, notify_list_node)
+ thermal_trip_crossed(tz, &td->trip, governor, true);
list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
- list_for_each_entry(td, &way_down_list, notify_list_node) {
- thermal_notify_tz_trip_down(tz, &td->trip);
- thermal_debug_tz_trip_down(tz, &td->trip);
- thermal_governor_trip_crossed(governor, tz, &td->trip, false);
- }
+ list_for_each_entry(td, &way_down_list, notify_list_node)
+ thermal_trip_crossed(tz, &td->trip, governor, false);
if (governor->manage)
governor->manage(tz);
@@ -593,6 +602,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false);
+}
+
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
@@ -984,9 +999,17 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret)
goto out_cdev_type;
+ /*
+ * The cooling device's current state is only needed for debug
+ * initialization below, so a failure to get it does not cause
+ * the entire cooling device initialization to fail. However,
+ * the debug will not work for the device if its initial state
+ * cannot be determined and drivers are responsible for ensuring
+ * that this will not happen.
+ */
ret = cdev->ops->get_cur_state(cdev, &current_state);
if (ret)
- goto out_cdev_type;
+ current_state = ULONG_MAX;
thermal_cooling_device_setup_sysfs(cdev);
@@ -1001,7 +1024,8 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(ret);
}
- thermal_debug_cdev_add(cdev, current_state);
+ if (current_state <= cdev->max_state)
+ thermal_debug_cdev_add(cdev, current_state);
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1382,6 +1406,7 @@ thermal_zone_device_register_with_trips(const char *type,
ida_init(&tz->ida);
mutex_init(&tz->lock);
init_completion(&tz->removal);
+ init_completion(&tz->resume);
id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
@@ -1627,6 +1652,9 @@ static void thermal_zone_device_resume(struct work_struct *work)
thermal_zone_device_init(tz);
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ complete(&tz->resume);
+ tz->resuming = false;
+
mutex_unlock(&tz->lock);
}
@@ -1644,6 +1672,20 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
+ if (tz->resuming) {
+ /*
+ * thermal_zone_device_resume() queued up for
+ * this zone has not acquired the lock yet, so
+ * release it to let the function run and wait
+ * util it has done the work.
+ */
+ mutex_unlock(&tz->lock);
+
+ wait_for_completion(&tz->resume);
+
+ mutex_lock(&tz->lock);
+ }
+
tz->suspended = true;
mutex_unlock(&tz->lock);
@@ -1661,6 +1703,9 @@ static int thermal_pm_notify(struct notifier_block *nb,
cancel_delayed_work(&tz->poll_queue);
+ reinit_completion(&tz->resume);
+ tz->resuming = true;
+
/*
* Replace the work function with the resume one, which
* will restore the original work function and schedule
@@ -1685,6 +1730,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
static struct notifier_block thermal_pm_nb = {
.notifier_call = thermal_pm_notify,
+ /*
+ * Run at the lowest priority to avoid interference between the thermal
+ * zone resume work items spawned by thermal_pm_notify() and the other
+ * PM notifiers.
+ */
+ .priority = INT_MIN,
};
static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index d9785e5bbb08..66f67e54e0c8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -55,6 +55,7 @@ struct thermal_governor {
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
* @removal: removal completion
+ * @resume: resume completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -89,6 +90,7 @@ struct thermal_governor {
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
* @suspended: thermal zone suspend indicator
+ * @resuming: indicates whether or not thermal zone resume is in progress
* @trips: array of struct thermal_trip objects
*/
struct thermal_zone_device {
@@ -96,6 +98,7 @@ struct thermal_zone_device {
char type[THERMAL_NAME_LENGTH];
struct device device;
struct completion removal;
+ struct completion resume;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -123,6 +126,7 @@ struct thermal_zone_device {
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
bool suspended;
+ bool resuming;
#ifdef CONFIG_THERMAL_DEBUGFS
struct thermal_debugfs *debugfs;
#endif
@@ -246,6 +250,8 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
void thermal_zone_trip_updated(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *tz);
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index 91f9c21235a8..942447229157 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -91,6 +91,8 @@ struct cdev_record {
*
* @timestamp: the trip crossing timestamp
* @duration: total time when the zone temperature was above the trip point
+ * @trip_temp: trip temperature at mitigation start
+ * @trip_hyst: trip hysteresis at mitigation start
* @count: the number of times the zone temperature was above the trip point
* @max: maximum recorded temperature above the trip point
* @min: minimum recorded temperature above the trip point
@@ -99,6 +101,8 @@ struct cdev_record {
struct trip_stats {
ktime_t timestamp;
ktime_t duration;
+ int trip_temp;
+ int trip_hyst;
int count;
int max;
int min;
@@ -574,6 +578,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
struct thermal_debugfs *thermal_dbg = tz->debugfs;
int trip_id = thermal_zone_trip_id(tz, trip);
ktime_t now = ktime_get();
+ struct trip_stats *trip_stats;
if (!thermal_dbg)
return;
@@ -639,7 +644,10 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id;
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
- tze->trip_stats[trip_id].timestamp = now;
+ trip_stats = &tze->trip_stats[trip_id];
+ trip_stats->trip_temp = trip->temperature;
+ trip_stats->trip_hyst = trip->hysteresis;
+ trip_stats->timestamp = now;
unlock:
mutex_unlock(&thermal_dbg->lock);
@@ -794,10 +802,6 @@ static int tze_seq_show(struct seq_file *s, void *v)
const struct thermal_trip *trip = &td->trip;
struct trip_stats *trip_stats;
- /* Skip invalid trips. */
- if (trip->temperature == THERMAL_TEMP_INVALID)
- continue;
-
/*
* There is no possible mitigation happening at the
* critical trip point, so the stats will be always
@@ -836,8 +840,8 @@ static int tze_seq_show(struct seq_file *s, void *v)
seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n",
4 , trip_id,
8, type,
- 9, trip->temperature,
- 9, trip->hysteresis,
+ 9, trip_stats->trip_temp,
+ 9, trip_stats->trip_hyst,
c, 10, duration_ms,
9, trip_stats->avg,
9, trip_stats->min,
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index d6a6acc78ddb..49e63db68517 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -152,17 +152,23 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
if (trip->temperature == temp)
return;
+ trip->temperature = temp;
+ thermal_notify_tz_trip_change(tz, trip);
+
if (temp == THERMAL_TEMP_INVALID) {
struct thermal_trip_desc *td = trip_to_trip_desc(trip);
- if (trip->type == THERMAL_TRIP_PASSIVE &&
- tz->temperature >= td->threshold) {
+ if (tz->temperature >= td->threshold) {
/*
- * The trip has been crossed, so the thermal zone's
- * passive count needs to be adjusted.
+ * The trip has been crossed on the way up, so some
+ * adjustments are needed to compensate for the lack
+ * of it going forward.
*/
- tz->passive--;
- WARN_ON_ONCE(tz->passive < 0);
+ if (trip->type == THERMAL_TRIP_PASSIVE) {
+ tz->passive--;
+ WARN_ON_ONCE(tz->passive < 0);
+ }
+ thermal_zone_trip_down(tz, trip);
}
/*
* Invalidate the threshold to avoid triggering a spurious
@@ -170,7 +176,5 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
*/
td->threshold = INT_MAX;
}
- trip->temperature = temp;
- thermal_notify_tz_trip_change(tz, trip);
}
EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 193e9dfc983b..70b52aac3d97 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) ||
- (supports_time(usb4) && independent_time_margins(usb4)))
+ if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
+ (supports_time(usb4) &&
+ independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 458bb1280ebf..5b97e420a95f 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -288,7 +288,7 @@ struct mxser_board {
enum mxser_must_hwid must_hwid;
speed_t max_baud;
- struct mxser_port ports[] __counted_by(nports);
+ struct mxser_port ports[] /* __counted_by(nports) */;
};
static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f252d0b5a434..5e9ca4376d68 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, count, false);
} else {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index ff15022369e4..b0adafc44747 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -15,7 +15,6 @@
*/
#include <linux/acpi.h>
-#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -42,8 +41,6 @@
#include <asm/irq.h>
-#include "../serial_base.h" /* For serial_base_add_isa_preferred_console() */
-
#include "8250.h"
/*
@@ -563,8 +560,6 @@ static void __init serial8250_isa_init_ports(void)
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
serial8250_isa_config(i, &up->port, &up->capabilities);
-
- serial_base_add_isa_preferred_console(serial8250_reg.dev_name, i);
}
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index ba9f4dc4e71d..fb809e32c6ae 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -55,6 +55,34 @@
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
#define DW_UART_QUIRK_APMC0D08 BIT(4)
+#define DW_UART_QUIRK_CPR_VALUE BIT(5)
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_value;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
+};
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -432,6 +460,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
+ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+
+ if (quirks & DW_UART_QUIRK_CPR_VALUE)
+ data->data.cpr_value = cpr_value;
#ifdef CONFIG_64BIT
if (quirks & DW_UART_QUIRK_OCTEON) {
@@ -714,8 +746,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
.usr_reg = DW_UART_USR,
- .cpr_val = 0x00012f32,
- .quirks = DW_UART_QUIRK_IS_DMA_FC,
+ .cpr_value = 0x00012f32,
+ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
};
static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 3e33ddf7bc80..5a2520943dfd 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
- struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg, old_dlf;
@@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p)
reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg) {
- reg = data->pdata->cpr_val;
+ reg = pd->cpr_value;
dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
}
if (!reg)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index f13e91f2cace..7dd2a8e7b780 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -2,15 +2,10 @@
/* Synopsys DesignWare 8250 library header file. */
#include <linux/io.h>
-#include <linux/notifier.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "8250.h"
-struct clk;
-struct reset_control;
-
struct dw8250_port_data {
/* Port properties */
int line;
@@ -19,42 +14,16 @@ struct dw8250_port_data {
struct uart_8250_dma dma;
/* Hardware configuration */
+ u32 cpr_value;
u8 dlf_size;
/* RS485 variables */
bool hw_rs485_support;
};
-struct dw8250_platform_data {
- u8 usr_reg;
- u32 cpr_val;
- unsigned int quirks;
-};
-
-struct dw8250_data {
- struct dw8250_port_data data;
- const struct dw8250_platform_data *pdata;
-
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
-
static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
{
if (p->iotype == UPIO_MEM32BE)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 170639d12b2a..ddac0a13cf84 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -115,6 +115,10 @@
/* RX FIFO occupancy indicator */
#define UART_OMAP_RX_LVL 0x19
+/* Timeout low and High */
+#define UART_OMAP_TO_L 0x26
+#define UART_OMAP_TO_H 0x27
+
/*
* Copy of the genpd flags for the console.
* Only used if console suspend is disabled
@@ -663,13 +667,24 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
/*
* On K3 SoCs, it is observed that RX TIMEOUT is signalled after
- * FIFO has been drained, in which case a dummy read of RX FIFO
- * is required to clear RX TIMEOUT condition.
+ * FIFO has been drained or erroneously.
+ * So apply solution of Errata i2310 as mentioned in
+ * https://www.ti.com/lit/pdf/sprz536
*/
if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
- (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
- serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
- serial_port_in(port, UART_RX);
+ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT) {
+ unsigned char efr2, timeout_h, timeout_l;
+
+ efr2 = serial_in(up, UART_OMAP_EFR2);
+ timeout_h = serial_in(up, UART_OMAP_TO_H);
+ timeout_l = serial_in(up, UART_OMAP_TO_L);
+ serial_out(up, UART_OMAP_TO_H, 0xFF);
+ serial_out(up, UART_OMAP_TO_L, 0xFF);
+ serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
+ serial_in(up, UART_IIR);
+ serial_out(up, UART_OMAP_EFR2, efr2);
+ serial_out(up, UART_OMAP_TO_H, timeout_h);
+ serial_out(up, UART_OMAP_TO_L, timeout_l);
}
/* Stop processing interrupts on input overrun */
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 40af74b55933..e1d7aa2fa347 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1985,6 +1985,17 @@ enum {
MOXA_SUPP_RS485 = BIT(2),
};
+static unsigned short moxa_get_nports(unsigned short device)
+{
+ switch (device) {
+ case PCI_DEVICE_ID_MOXA_CP116E_A_A:
+ case PCI_DEVICE_ID_MOXA_CP116E_A_B:
+ return 8;
+ }
+
+ return FIELD_GET(0x00F0, device);
+}
+
static bool pci_moxa_is_mini_pcie(unsigned short device)
{
if (device == PCI_DEVICE_ID_MOXA_CP102N ||
@@ -2038,7 +2049,7 @@ static int pci_moxa_init(struct pci_dev *dev)
{
unsigned short device = dev->device;
resource_size_t iobar_addr = pci_resource_start(dev, 2);
- unsigned int num_ports = (device & 0x00F0) >> 4, i;
+ unsigned int i, num_ports = moxa_get_nports(device);
u8 val, init_mode = MOXA_RS232;
if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index f1a51b00b1b9..ba96fa913e7f 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.fifosize = 64;
+ uart.tx_loadsz = 32;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4fdd7857ef4d..28e4beeabf8f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1023,8 +1023,9 @@ config SERIAL_SCCNXP_CONSOLE
help
Support for console on SCCNXP serial ports.
-config SERIAL_SC16IS7XX_CORE
+config SERIAL_SC16IS7XX
tristate "NXP SC16IS7xx UART support"
+ depends on SPI_MASTER || I2C
select SERIAL_CORE
select SERIAL_SC16IS7XX_SPI if SPI_MASTER
select SERIAL_SC16IS7XX_I2C if I2C
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index faa45f2b8bb0..6ff74f0a9530 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -75,7 +75,7 @@ obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
-obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
+obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o
obj-$(CONFIG_SERIAL_SC16IS7XX_SPI) += sc16is7xx_spi.o
obj-$(CONFIG_SERIAL_SC16IS7XX_I2C) += sc16is7xx_i2c.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 34801a6f300b..b88cc28c94e3 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -308,8 +308,8 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
-
- pending = uart_port_tx_limited(port, ch, port->fifosize - val,
+ pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP,
+ port->fifosize - val,
true,
bcm_uart_writel(port, ch, UART_FIFO_REG),
({}));
@@ -320,6 +320,9 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
+
+ if (uart_tx_stopped(port))
+ bcm_uart_stop_tx(port);
}
/*
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2eb22594960f..f4f40c9373c2 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1952,8 +1952,10 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
/* Make sure Rx is enabled in case Tx is active with Rx disabled */
if (!(rs485conf->flags & SER_RS485_ENABLED) ||
- rs485conf->flags & SER_RS485_RX_DURING_TX)
+ rs485conf->flags & SER_RS485_RX_DURING_TX) {
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
imx_uart_start_rx(port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index b0604d6da025..58858dd352c5 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = {
.verify_port = mcf_verify_port,
};
-static struct mcf_uart mcf_ports[4];
+static struct mcf_uart mcf_ports[10];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index 743a72ac34f3..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -49,33 +49,3 @@ void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port
int serial_core_register_port(struct uart_driver *drv, struct uart_port *port);
void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port);
-
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port);
-
-#else
-
-static inline
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- return 0;
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-int serial_base_add_isa_preferred_console(const char *name, int idx);
-
-#else
-
-static inline
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 73c6ee540c83..4df2a4b10445 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -8,7 +8,6 @@
* The serial core bus manages the serial core controller instances.
*/
-#include <linux/cleanup.h>
#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/idr.h>
@@ -205,134 +204,6 @@ void serial_base_port_device_remove(struct serial_port_device *port_dev)
put_device(&port_dev->dev);
}
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-static int serial_base_add_one_prefcon(const char *match, const char *dev_name,
- int port_id)
-{
- int ret;
-
- ret = add_preferred_console_match(match, dev_name, port_id);
- if (ret == -ENOENT)
- return 0;
-
- return ret;
-}
-
-#ifdef __sparc__
-
-/* Handle Sparc ttya and ttyb options as done in console_setup() */
-static int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- const char *name;
-
- switch (idx) {
- case 0:
- name = "ttya";
- break;
- case 1:
- name = "ttyb";
- break;
- default:
- return 0;
- }
-
- return serial_base_add_one_prefcon(name, dev_name, idx);
-}
-
-#else
-
-static inline int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- return 0;
-}
-
-#endif
-
-static int serial_base_add_prefcon(const char *name, int idx)
-{
- const char *char_match __free(kfree) = NULL;
- const char *nmbr_match __free(kfree) = NULL;
- int ret;
-
- /* Handle ttyS specific options */
- if (strstarts(name, "ttyS")) {
- /* No name, just a number */
- nmbr_match = kasprintf(GFP_KERNEL, "%i", idx);
- if (!nmbr_match)
- return -ENODEV;
-
- ret = serial_base_add_one_prefcon(nmbr_match, name, idx);
- if (ret)
- return ret;
-
- /* Sparc ttya and ttyb */
- ret = serial_base_add_sparc_console(name, idx);
- if (ret)
- return ret;
- }
-
- /* Handle the traditional character device name style console=ttyS0 */
- char_match = kasprintf(GFP_KERNEL, "%s%i", name, idx);
- if (!char_match)
- return -ENOMEM;
-
- return serial_base_add_one_prefcon(char_match, name, idx);
-}
-
-/**
- * serial_base_add_preferred_console - Adds a preferred console
- * @drv: Serial port device driver
- * @port: Serial port instance
- *
- * Tries to add a preferred console for a serial port if specified in the
- * kernel command line. Supports both the traditional character device such
- * as console=ttyS0, and a hardware addressing based console=DEVNAME:0.0
- * style name.
- *
- * Translates the kernel command line option using a hardware based addressing
- * console=DEVNAME:0.0 to the serial port character device such as ttyS0.
- * Cannot be called early for ISA ports, depends on struct device.
- *
- * Note that duplicates are ignored by add_preferred_console().
- *
- * Return: 0 on success, negative error code on failure.
- */
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- const char *port_match __free(kfree) = NULL;
- int ret;
-
- ret = serial_base_add_prefcon(drv->dev_name, port->line);
- if (ret)
- return ret;
-
- port_match = kasprintf(GFP_KERNEL, "%s:%i.%i", dev_name(port->dev),
- port->ctrl_id, port->port_id);
- if (!port_match)
- return -ENOMEM;
-
- /* Translate a hardware addressing style console=DEVNAME:0.0 */
- return serial_base_add_one_prefcon(port_match, drv->dev_name, port->line);
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-/*
- * Early ISA ports initialize the console before there is no struct device.
- * This should be only called from serial8250_isa_init_preferred_console(),
- * other callers are likely wrong and should rely on earlycon instead.
- */
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return serial_base_add_prefcon(name, idx);
-}
-
-#endif
-
static int serial_base_init(void)
{
int ret;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c1a0254d3f4..2a8006e3d687 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -622,7 +622,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
return -EL3HLT;
port = uart_port_lock(state, flags);
- if (WARN_ON_ONCE(!state->port.xmit_buf)) {
+ if (!state->port.xmit_buf) {
uart_port_unlock(port, flags);
return 0;
}
@@ -3422,10 +3422,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
if (ret)
goto err_unregister_ctrl_dev;
- ret = serial_base_add_preferred_console(drv, port);
- if (ret)
- goto err_unregister_port_dev;
-
ret = serial_core_add_one_port(drv, port);
if (ret)
goto err_unregister_port_dev;
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 91a338d3cb34..d35f1d24156c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -64,6 +64,13 @@ static int serial_port_runtime_suspend(struct device *dev)
if (port->flags & UPF_DEAD)
return 0;
+ /*
+ * Nothing to do on pm_runtime_force_suspend(), see
+ * DEFINE_RUNTIME_DEV_PM_OPS.
+ */
+ if (!pm_runtime_enabled(dev))
+ return 0;
+
uart_port_lock_irqsave(port, &flags);
if (!port_dev->tx_enabled) {
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 005d63ab1f44..8944548c30fa 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -634,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
- int err = FAILED;
+ int err;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
- goto out;
+ return FAILED;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
- goto out;
+ return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
@@ -659,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
- goto out;
+ return FAILED;
}
/*
@@ -667,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
- if (ufshcd_try_to_abort_task(hba, tag)) {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
- goto out;
+ return FAILED;
}
- err = SUCCESS;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
-out:
- return err;
+ return SUCCESS;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0cf07194bbe8..1b65e6ae4137 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1366,7 +1366,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- ufshcd_scsi_block_requests(hba);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1375,7 +1375,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
goto out;
}
@@ -1396,7 +1396,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
}
@@ -8787,6 +8787,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
+ ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5ce429286ab0..20d2a55cb40b 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -1145,4 +1145,5 @@ static void __exit uio_exit(void)
module_init(uio_init)
module_exit(uio_exit)
+MODULE_DESCRIPTION("Userspace IO core module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c
index 64eafd59e6e7..8c164e51ff9e 100644
--- a/drivers/uio/uio_aec.c
+++ b/drivers/uio/uio_aec.c
@@ -144,4 +144,5 @@ static struct pci_driver pci_driver = {
};
module_pci_driver(pci_driver);
+MODULE_DESCRIPTION("Adrienne Electronics Corp time code PCI device");
MODULE_LICENSE("GPL");
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 653f842a1491..1cc3b8b5a345 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -130,5 +130,6 @@ static struct pci_driver hilscher_pci_driver = {
module_pci_driver(hilscher_pci_driver);
MODULE_DEVICE_TABLE(pci, hilscher_pci_ids);
+MODULE_DESCRIPTION("UIO Hilscher CIF card driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger");
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index 5065c6a073a8..790412f8dfd5 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * UIO driver fo Humusoft MF624 DAQ card.
+ * UIO driver for Humusoft MF624 DAQ card.
* Copyright (C) 2011 Rostislav Lisovy <lisovy@gmail.com>,
* Czech Technical University in Prague
*/
@@ -221,5 +221,6 @@ static struct pci_driver mf624_pci_driver = {
MODULE_DEVICE_TABLE(pci, mf624_pci_id);
module_pci_driver(mf624_pci_driver);
+MODULE_DESCRIPTION("UIO driver for Humusoft MF624 DAQ card");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 2319d6de8d04..a1a58802c793 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -170,5 +170,6 @@ static struct pci_driver netx_pci_driver = {
module_pci_driver(netx_pci_driver);
MODULE_DEVICE_TABLE(pci, netx_pci_ids);
+MODULE_DESCRIPTION("UIO driver for Hilscher NetX based fieldbus cards");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hans J. Koch, Manuel Traut");
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 3a9a0dd4be70..949eca0adebe 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_FSL_USB2) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
+obj-$(CONFIG_USB_XEN_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 4ce7cba2b48a..8f3b9a0a38e1 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
+ struct usb_endpoint_descriptor *in, *out;
int ret;
/* instance init */
@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
goto fail;
}
+ if (usb_endpoint_xfer_int(&cmd_ep->desc))
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &in, &out);
+ else
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ &in, &out, NULL, NULL);
+
+ if (ret) {
+ usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
usb_fill_int_urb(instance->rcv_urb,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index bada13f704b6..835bf2428dc6 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1084,6 +1084,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = ci_ulpi_init(ci);
+ if (ret)
+ return ret;
+
if (ci->platdata->phy) {
ci->phy = ci->platdata->phy;
} else if (ci->platdata->usb_phy) {
@@ -1138,10 +1142,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
goto ulpi_exit;
}
- ret = ci_ulpi_init(ci);
- if (ret)
- return ret;
-
ci->hw_bank.phys = res->start;
ci->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 89fb51e2c3de..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -68,6 +68,11 @@ int ci_ulpi_init(struct ci_hdrc *ci)
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
return 0;
+ /*
+ * Set PORTSC correctly so we can read/write ULPI registers for
+ * identification purposes
+ */
+ hw_phymode_configure(ci);
ci->ulpi_ops.read = ci_ulpi_read;
ci->ulpi_ops.write = ci_ulpi_write;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c553decb5461..6830be4419e2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
goto sw; /* halt is cleared in work */
default:
- dev_err(&desc->intf->dev,
+ dev_err_ratelimited(&desc->intf->dev,
"nonzero urb status received: %d\n", status);
break;
}
}
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
- dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
urb->actual_length);
goto exit;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e3366f4d82b9..1ff7d901fede 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
+ unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
/*
- * This function can be called in task context inside another remote
- * coverage collection section, but kcov doesn't support that kind of
- * recursion yet. Only collect coverage in softirq context for now.
+ * Only collect coverage in the softirq context and disable interrupts
+ * to avoid scenarios with nested remote coverage collection sections
+ * that KCOV does not support.
+ * See the comment next to kcov_remote_start_usb_softirq() for details.
*/
- kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop_softirq();
+ kcov_remote_stop_softirq(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7ee61a89520b..cb82557678dd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -957,12 +957,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
static void dwc3_core_setup_global_control(struct dwc3 *dwc)
{
+ unsigned int power_opt;
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ switch (power_opt) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
/**
* WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
@@ -995,6 +999,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
break;
}
+ /*
+ * This is a workaround for STAR#4846132, which only affects
+ * DWC_usb31 version2.00a operating in host mode.
+ *
+ * There is a problem in DWC_usb31 version 2.00a operating
+ * in host mode that would cause a CSR read timeout When CSR
+ * read coincides with RAM Clock Gating Entry. By disable
+ * Clock Gating, sacrificing power consumption for normal
+ * operation.
+ */
+ if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
+ hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG;
+
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
dev_info(dwc->dev, "Running with FPGA optimizations\n");
@@ -2250,7 +2268,6 @@ assert_reset:
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
u32 reg;
int i;
@@ -2293,9 +2310,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
}
@@ -2312,7 +2327,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
int ret;
u32 reg;
int i;
@@ -2366,9 +2380,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
dwc3_otg_host_init(dwc);
} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
}
break;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index ba7d180cc9e6..44e20c6c36d3 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *ss)
{
switch (gadget->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
return ss;
case USB_SPEED_HIGH:
@@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* We will use this flag later to check if a printer reset happened
* after we turn interrupts back on.
@@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
dev->reset_printer = 0;
setup_rx_reqs(dev);
+ /* this dropped the lock - need to retest */
+ if (dev->interface < 0)
+ goto out_disabled;
bytes_copied = 0;
current_rx_req = dev->current_rx_req;
@@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->rx_wait,
(likely(!list_empty(&dev->rx_buffers))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
/* We have data to return then copy it to the caller's buffer.*/
@@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
/* If we not returning all the data left in this RX request
* buffer then adjust the amount of data left in the buffer.
* Othewise if we are done with this RX request buffer then
@@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static ssize_t
@@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* Check if a printer reset happens while we have interrupts on */
dev->reset_printer = 0;
@@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->tx_wait,
(likely(!list_empty(&dev->tx_reqs))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
while (likely(!list_empty(&dev->tx_reqs)) && len) {
@@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
list_add(&req->list, &dev->tx_reqs_active);
/* here, we unlock, and only unlock, to avoid deadlock. */
@@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static int
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 11dd0b9e847f..95191083b455 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1163,8 +1163,6 @@ struct net_device *gether_connect(struct gether *link)
if (netif_running(dev->net))
eth_start(dev, GFP_ATOMIC);
- netif_device_attach(dev->net);
-
/* on error, disable any endpoints */
} else {
(void) usb_ep_disable(link->out_ep);
@@ -1202,7 +1200,7 @@ void gether_disconnect(struct gether *link)
DBG(dev, "%s\n", __func__);
- netif_device_detach(dev->net);
+ netif_stop_queue(dev->net);
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index 3916c8e2ba01..821a6ab5da56 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -66,8 +66,8 @@
#define USB_UPSTREAM_EN BIT(0)
/* Main config reg */
-#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
-#define UDC_CFG_ADDR_MASK (0x3f)
+#define UDC_CFG_SET_ADDR(x) ((x) & UDC_CFG_ADDR_MASK)
+#define UDC_CFG_ADDR_MASK GENMASK(6, 0)
/* Interrupt ctrl & status reg */
#define UDC_IRQ_EP_POOL_NAK BIT(17)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c040d816e626..05881153883e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -36,6 +36,7 @@
#define PCI_VENDOR_ID_ETRON 0x1b6f
#define PCI_DEVICE_ID_EJ168 0x7023
+#define PCI_DEVICE_ID_EJ188 0x7052
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -395,6 +396,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ188) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
xhci->quirks |= XHCI_ZERO_64B_REGS;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9e90d2952760..fd0cde3d1569 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1031,13 +1031,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
@@ -1057,10 +1071,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
@@ -1346,6 +1366,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
+ bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1432,6 +1453,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+ deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1441,8 +1464,17 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ if (deferred) {
+ /* We have more streams to clear */
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -2524,9 +2556,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 30415158ed3c..78d014c4d884 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
+ TD_CLEARING_CACHE_DEFERRED,
TD_CLEARED,
};
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 8abf3a567e30..108d9a593a80 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -556,7 +556,7 @@ static int da8xx_probe(struct platform_device *pdev)
ret = of_platform_populate(pdev->dev.of_node, NULL,
da8xx_auxdata_lookup, &pdev->dev);
if (ret)
- return ret;
+ goto err_unregister_phy;
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
@@ -571,9 +571,13 @@ static int da8xx_probe(struct platform_device *pdev)
ret = PTR_ERR_OR_ZERO(glue->musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
- usb_phy_generic_unregister(glue->usb_phy);
+ goto err_unregister_phy;
}
+ return 0;
+
+err_unregister_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
return ret;
}
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 115f05a6201a..40d34cc28344 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -105,6 +105,8 @@ struct alauda_info {
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
+
+ bool media_initialized;
};
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
}
/* Check for media change */
- if (status[0] & 0x08) {
+ if (status[0] & 0x08 || !info->media_initialized) {
usb_stor_dbg(us, "Media change detected\n");
alauda_free_maps(&MEDIA_INFO(us));
- alauda_init_media(us);
-
+ rc = alauda_init_media(us);
+ if (rc == USB_STOR_TRANSPORT_GOOD)
+ info->media_initialized = true;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index b31464740f6c..8c8b5e6041cc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -79,6 +79,12 @@ static int slave_alloc (struct scsi_device *sdev)
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
return 0;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index a48870a87a29..b610a2de4ae5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -21,6 +21,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
sdev->hostdata = devinfo;
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8a1af08f71b6..5d4da962acc8 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
- if (cap)
+ if (cap) {
usb_power_delivery_unregister_capabilities(cap);
+ port->partner_source_caps = NULL;
+ }
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
@@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
switch (port->state) {
+ case TOGGLING:
case ERROR_RECOVERY:
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index cb52e7b0a2c5..2cc7aedd490f 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -153,8 +153,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
}
if (cci & UCSI_CCI_ERROR) {
- if (cmd == UCSI_GET_ERROR_STATUS)
+ if (cmd == UCSI_GET_ERROR_STATUS) {
+ ret = ucsi_acknowledge(ucsi, false);
+ if (ret)
+ return ret;
+
return -EIO;
+ }
return ucsi_read_error(ucsi);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 8d112c3edae5..adf32ca0f761 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -25,6 +25,7 @@ struct ucsi_acpi {
unsigned long flags;
#define UCSI_ACPI_COMMAND_PENDING 1
#define UCSI_ACPI_ACK_PENDING 2
+#define UCSI_ACPI_CHECK_BOGUS_EVENT 3
guid_t guid;
u64 cmd;
};
@@ -128,6 +129,58 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
.async_write = ucsi_acpi_async_write
};
+static int ucsi_gram_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
+{
+ u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
+ UCSI_CONSTAT_PDOS_CHANGE;
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector_status *status;
+ int ret;
+
+ ret = ucsi_acpi_read(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
+ test_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags) &&
+ offset == UCSI_MESSAGE_IN) {
+ status = (struct ucsi_connector_status *)val;
+
+ /* Clear the bogus change */
+ if (status->change == bogus_change)
+ status->change = 0;
+
+ clear_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+ }
+
+ return ret;
+}
+
+static int ucsi_gram_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
+ ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
+ ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
+ set_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+
+ return ret;
+}
+
+static const struct ucsi_operations ucsi_gram_ops = {
+ .read = ucsi_gram_read,
+ .sync_write = ucsi_gram_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
@@ -136,6 +189,14 @@ static const struct dmi_system_id ucsi_acpi_quirks[] = {
},
.driver_data = (void *)&ucsi_zenbook_ops,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "LG gram PC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "90Q"),
+ },
+ .driver_data = (void *)&ucsi_gram_ops,
+ },
{ }
};
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index f7546bb488c3..2fa973afe4e6 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -14,7 +14,7 @@
#include <linux/soc/qcom/pmic_glink.h>
#include "ucsi.h"
-#define PMIC_GLINK_MAX_PORTS 2
+#define PMIC_GLINK_MAX_PORTS 3
#define UCSI_BUF_SIZE 48
@@ -372,6 +372,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
ret = fwnode_property_read_u32(fwnode, "reg", &port);
if (ret < 0) {
dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
return ret;
}
@@ -386,9 +387,11 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
if (!desc)
continue;
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(desc),
"unable to acquire orientation gpio\n");
+ }
ucsi->port_orientation[port] = desc;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index ac48b7763114..ac69288e8bb0 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -65,6 +65,7 @@ struct ucsi_stm32g0 {
struct device *dev;
unsigned long flags;
#define COMMAND_PENDING 1
+#define ACK_PENDING 2
const char *fw_name;
struct ucsi *ucsi;
bool suspended;
@@ -396,9 +397,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
int ret;
- set_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ set_bit(ACK_PENDING, &g0->flags);
+ else
+ set_bit(COMMAND_PENDING, &g0->flags);
ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
if (ret)
@@ -406,9 +411,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
ret = -ETIMEDOUT;
+ else
+ return 0;
out_clear_bit:
- clear_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ clear_bit(ACK_PENDING, &g0->flags);
+ else
+ clear_bit(COMMAND_PENDING, &g0->flags);
return ret;
}
@@ -429,8 +439,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(COMMAND_PENDING, &g0->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
+ complete(&g0->complete);
+ if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
complete(&g0->complete);
return IRQ_HANDLED;
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index e75da0a70d1f..bb1817bd4ff3 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
filep->private_data = df;
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
return 0;
err_put_registration:
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 610a429c6191..ded364588d29 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
*/
filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 80cae87fff36..987c7921affa 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
}
EXPORT_SYMBOL_GPL(vfio_pci_core_write);
-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
+static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
{
- struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+ struct vfio_device *core_vdev = &vdev->vdev;
+ loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
+ loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
+ loff_t len = end - start;
- /*
- * Lock ordering:
- * vma_lock is nested under mmap_lock for vm_ops callback paths.
- * The memory_lock semaphore is used by both code paths calling
- * into this function to zap vmas and the vm_ops.fault callback
- * to protect the memory enable state of the device.
- *
- * When zapping vmas we need to maintain the mmap_lock => vma_lock
- * ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_lock and
- * reacquiring vma_lock. This logic is derived from similar
- * requirements in uverbs_user_mmap_disassociate().
- *
- * mmap_lock must always be the top-level lock when it is taken.
- * Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_lock to clear
- * entries. vma_list can only be guaranteed empty when holding
- * vma_lock, thus memory_lock is nested under vma_lock.
- *
- * This enables the vm_ops.fault callback to acquire vma_lock,
- * followed by memory_lock read lock, while already holding
- * mmap_lock without risk of deadlock.
- */
- while (1) {
- struct mm_struct *mm = NULL;
-
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock))
- return 0;
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- while (!list_empty(&vdev->vma_list)) {
- mmap_vma = list_first_entry(&vdev->vma_list,
- struct vfio_pci_mmap_vma,
- vma_next);
- mm = mmap_vma->vma->vm_mm;
- if (mmget_not_zero(mm))
- break;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- mm = NULL;
- }
- if (!mm)
- return 1;
- mutex_unlock(&vdev->vma_lock);
-
- if (try) {
- if (!mmap_read_trylock(mm)) {
- mmput(mm);
- return 0;
- }
- } else {
- mmap_read_lock(mm);
- }
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock)) {
- mmap_read_unlock(mm);
- mmput(mm);
- return 0;
- }
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- list_for_each_entry_safe(mmap_vma, tmp,
- &vdev->vma_list, vma_next) {
- struct vm_area_struct *vma = mmap_vma->vma;
-
- if (vma->vm_mm != mm)
- continue;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
-
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&vdev->vma_lock);
- mmap_read_unlock(mm);
- mmput(mm);
- }
+ unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
}
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{
- vfio_pci_zap_and_vma_lock(vdev, false);
down_write(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+ vfio_pci_zap_bars(vdev);
}
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
@@ -1725,99 +1645,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
up_write(&vdev->memory_lock);
}
-/* Caller holds vma_lock */
-static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
- struct vm_area_struct *vma)
-{
- struct vfio_pci_mmap_vma *mmap_vma;
-
- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
- if (!mmap_vma)
- return -ENOMEM;
-
- mmap_vma->vma = vma;
- list_add(&mmap_vma->vma_next, &vdev->vma_list);
-
- return 0;
-}
-
-/*
- * Zap mmaps on open so that we can fault them in on access and therefore
- * our vma_list only tracks mappings accessed since last zap.
- */
-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
-{
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+static unsigned long vma_to_pfn(struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
+ int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ u64 pgoff;
- mutex_lock(&vdev->vma_lock);
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma) {
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- break;
- }
- }
- mutex_unlock(&vdev->vma_lock);
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+
+ return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
- vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vma->vm_start;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+
+ pfn = vma_to_pfn(vma);
- mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
- /*
- * Memory region cannot be accessed if the low power feature is engaged
- * or memory access is disabled.
- */
- if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
- ret = VM_FAULT_SIGBUS;
- goto up_out;
- }
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
+ goto out_unlock;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ if (ret & VM_FAULT_ERROR)
+ goto out_unlock;
/*
- * We populate the whole vma on fault, so we need to test whether
- * the vma has already been mapped, such as for concurrent faults
- * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
- * we ask it to fill the same range again.
+ * Pre-fault the remainder of the vma, abort further insertions and
+ * supress error if fault is encountered during pre-fault.
*/
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma)
- goto up_out;
- }
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- ret = VM_FAULT_SIGBUS;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- goto up_out;
- }
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
+ if (addr == vmf->address)
+ continue;
- if (__vfio_pci_add_vma(vdev, vma)) {
- ret = VM_FAULT_OOM;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
}
-up_out:
+out_unlock:
up_read(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+
return ret;
}
static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .open = vfio_pci_mmap_open,
- .close = vfio_pci_mmap_close,
.fault = vfio_pci_mmap_fault,
};
@@ -1880,11 +1757,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/*
- * See remap_pfn_range(), called from vfio_pci_fault() but we can't
- * change vm_flags within the fault handler. Set them now.
+ * Set vm_flags now, they should not be changed in the fault handler.
+ * We want the same flags and page protection (decrypted above) as
+ * io_remap_pfn_range() would set.
*
* VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
* allowing KVM stage 2 device mapping attributes to use Normal-NC
@@ -2202,8 +2080,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
- mutex_init(&vdev->vma_lock);
- INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
@@ -2219,7 +2095,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
- mutex_destroy(&vdev->vma_lock);
kfree(vdev->region);
kfree(vdev->pm_save);
}
@@ -2497,26 +2372,15 @@ unwind:
return ret;
}
-/*
- * We need to get memory_lock for each device, but devices can share mmap_lock,
- * therefore we need to zap and hold the vma_lock for each device, and only then
- * get each memory_lock.
- */
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx)
{
- struct vfio_pci_core_device *cur_mem;
- struct vfio_pci_core_device *cur_vma;
- struct vfio_pci_core_device *cur;
+ struct vfio_pci_core_device *vdev;
struct pci_dev *pdev;
- bool is_mem = true;
int ret;
mutex_lock(&dev_set->lock);
- cur_mem = list_first_entry(&dev_set->device_list,
- struct vfio_pci_core_device,
- vdev.dev_set_list);
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev) {
@@ -2533,7 +2397,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
if (ret)
goto err_unlock;
- list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
bool owned;
/*
@@ -2557,38 +2421,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* Otherwise, reset is not allowed.
*/
if (iommufd_ctx) {
- int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
+ int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
iommufd_ctx);
owned = (devid > 0 || devid == -ENOENT);
} else {
- owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
+ owned = vfio_dev_in_groups(&vdev->vdev, groups);
}
if (!owned) {
ret = -EINVAL;
- goto err_undo;
+ break;
}
/*
- * Locking multiple devices is prone to deadlock, runaway and
- * unwind if we hit contention.
+ * Take the memory write lock for each device and zap BAR
+ * mappings to prevent the user accessing the device while in
+ * reset. Locking multiple devices is prone to deadlock,
+ * runaway and unwind if we hit contention.
*/
- if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
+ if (!down_write_trylock(&vdev->memory_lock)) {
ret = -EBUSY;
- goto err_undo;
+ break;
}
+
+ vfio_pci_zap_bars(vdev);
}
- cur_vma = NULL;
- list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
- if (!down_write_trylock(&cur_mem->memory_lock)) {
- ret = -EBUSY;
- goto err_undo;
- }
- mutex_unlock(&cur_mem->vma_lock);
+ if (!list_entry_is_head(vdev,
+ &dev_set->device_list, vdev.dev_set_list)) {
+ vdev = list_prev_entry(vdev, vdev.dev_set_list);
+ goto err_undo;
}
- cur_mem = NULL;
/*
* The pci_reset_bus() will reset all the devices in the bus.
@@ -2599,25 +2463,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* cause the PCI config space reset without restoring the original
* state (saved locally in 'vdev->pm_save').
*/
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- vfio_pci_set_power_state(cur, PCI_D0);
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_reset_bus(pdev);
+ vdev = list_last_entry(&dev_set->device_list,
+ struct vfio_pci_core_device, vdev.dev_set_list);
+
err_undo:
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
- if (cur == cur_mem)
- is_mem = false;
- if (cur == cur_vma)
- break;
- if (is_mem)
- up_write(&cur->memory_lock);
- else
- mutex_unlock(&cur->vma_lock);
- }
+ list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
+ vdev.dev_set_list)
+ up_write(&vdev->memory_lock);
+
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&vdev->pdev->dev);
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index e97d796a54fb..a5a62d9d963f 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -22,8 +22,10 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pseudo_fs.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -43,9 +45,13 @@
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO - User Level meta-driver"
+#define VFIO_MAGIC 0x5646494f /* "VFIO" */
+
static struct vfio {
struct class *device_class;
struct ida device_ida;
+ struct vfsmount *vfs_mount;
+ int fs_count;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
@@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
if (device->ops->release)
device->ops->release(device);
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
kvfree(device);
}
@@ -228,6 +236,34 @@ out_free:
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+static int vfio_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type vfio_fs_type = {
+ .name = "vfio",
+ .owner = THIS_MODULE,
+ .init_fs_context = vfio_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *vfio_fs_inode_new(void)
+{
+ struct inode *inode;
+ int ret;
+
+ ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
+ if (ret)
+ return ERR_PTR(ret);
+
+ inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+
+ return inode;
+}
+
/*
* Initialize a vfio_device so it can be registered to vfio core.
*/
@@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
+ device->inode = vfio_fs_inode_new();
+ if (IS_ERR(device->inode)) {
+ ret = PTR_ERR(device->inode);
+ goto out_inode;
+ }
if (ops->init) {
ret = ops->init(device);
@@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
return 0;
out_uninit:
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+out_inode:
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
return ret;
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index afb1cc4606c5..d82e86d3ddf6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -504,7 +504,7 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
if (result == 0)
result = count;
} else {
- dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
+ dev_info(dev, "Device %02x-%012llx doesn't exist\n", rn.family,
(unsigned long long)rn.id);
result = -EINVAL;
}
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 3a71c5eb2f83..19a0ea28e9f3 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -32,12 +32,8 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
* We are in process context(kernel thread), so can sleep.
*/
dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
- if (!dev) {
- pr_err("Failed to allocate %zd bytes for new w1 device.\n",
- sizeof(struct w1_master));
+ if (!dev)
return NULL;
- }
-
dev->bus_master = (struct w1_bus_master *)(dev + 1);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 85eea38dbdf4..2882944d23cc 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -257,6 +257,7 @@ config GPIO_WATCHDOG_ARCH_INITCALL
config LENOVO_SE10_WDT
tristate "Lenovo SE10 Watchdog"
depends on (X86 && DMI) || COMPILE_TEST
+ depends on HAS_IOPORT
select WATCHDOG_CORE
help
If you say yes here you get support for the watchdog
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index c7de30270043..0508a65acfa6 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -161,6 +161,7 @@ static struct mcb_driver men_z069_driver = {
module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
+MODULE_DESCRIPTION("Watchdog driver for the MEN z069 IP-Core");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
MODULE_IMPORT_NS(MCB);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index a7a12f2fe9de..b6e0236509bb 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -370,5 +370,6 @@ static struct platform_driver omap_wdt_driver = {
module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
+MODULE_DESCRIPTION("Driver for the TI OMAP 16xx/24xx/34xx 32KHz (non-secure) watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_wdt");
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index cdc1a2e15180..1e91f0a560ff 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -227,6 +227,7 @@ static struct platform_driver simatic_ipc_wdt_driver = {
module_platform_driver(simatic_ipc_wdt_driver);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC driver for Watchdogs");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 0099403f4992..24b1ad52102e 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -200,5 +200,6 @@ static struct platform_driver ts4800_wdt_driver = {
module_platform_driver(ts4800_wdt_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Watchdog driver for TS-4800 based boards");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_wdt");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 09d17e20f4a7..8c80d04811e4 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -118,6 +118,7 @@ static struct platform_driver twl4030_wdt_driver = {
module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TWL4030 Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_wdt");
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f16f73581634..01338d4c2d9e 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
static void v9fs_dentry_release(struct dentry *dentry)
{
struct hlist_node *p, *n;
+ struct hlist_head head;
p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
dentry, dentry);
- hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
+
+ spin_lock(&dentry->d_lock);
+ hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
+ spin_unlock(&dentry->d_lock);
+
+ hlist_for_each_safe(p, n, &head)
p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
- dentry->d_fsdata = NULL;
}
static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 7a3308d77606..fd72fc38c8f5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -348,6 +348,7 @@ void v9fs_evict_inode(struct inode *inode)
__le32 __maybe_unused version;
if (!is_bad_inode(inode)) {
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
version = cpu_to_le32(v9inode->qid.version);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 94fc049aff58..15bb7989c387 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -648,6 +648,7 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
afs_set_cache_aux(vnode, &aux);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 97f50e9fd9eb..297487ee8323 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
put_page(page);
if (ret < 0)
return ret;
+
+ /* Don't cross a backup volume mountpoint from a backup volume */
+ if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
+ ctx->type == AFSVL_BACKVOL)
+ return -ENODEV;
}
return 0;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 346cd91f91f9..1de9fac3bcf4 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -29,7 +29,7 @@
#include <linux/sched/task.h>
#include <linux/sort.h>
-static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket);
+static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
/* Persistent alloc info: */
@@ -259,6 +259,14 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
"invalid data type (got %u should be %u)",
a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
+ for (unsigned i = 0; i < 2; i++)
+ bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX,
+ c, err,
+ alloc_key_io_time_bad,
+ "invalid io_time[%s]: %llu, max %llu",
+ i == READ ? "read" : "write",
+ a.v->io_time[i], LRU_TIME_MAX);
+
switch (a.v->data_type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
@@ -741,6 +749,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
@@ -756,8 +765,8 @@ int bch2_trigger_alloc(struct btree_trans *trans,
alloc_data_type_set(new_a, new_a->data_type);
if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
- new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+ new_a->io_time[READ] = bch2_current_io_time(c, READ);
+ new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
}
@@ -767,6 +776,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
!bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+ alloc_data_type_set(new_a, new_a->data_type);
}
if (old_a->data_type != new_a->data_type ||
@@ -780,7 +790,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
if (new_a->data_type == BCH_DATA_cached &&
!new_a->io_time[READ])
- new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+ new_a->io_time[READ] = bch2_current_io_time(c, READ);
u64 old_lru = alloc_lru_idx_read(*old_a);
u64 new_lru = alloc_lru_idx_read(*new_a);
@@ -860,8 +870,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
}
percpu_down_read(&c->mark_lock);
- if (new_a->gen != old_a->gen)
- *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+ if (new_a->gen != old_a->gen) {
+ u8 *gen = bucket_gen(ca, new.k->p.offset);
+ if (unlikely(!gen)) {
+ percpu_up_read(&c->mark_lock);
+ goto invalid_bucket;
+ }
+ *gen = new_a->gen;
+ }
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
percpu_up_read(&c->mark_lock);
@@ -875,14 +891,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
closure_wake_up(&c->freelist_wait);
if (statechange(a->data_type == BCH_DATA_need_discard) &&
- !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
+ !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
bucket_flushed(new_a))
- bch2_discard_one_bucket_fast(c, new.k->p);
+ bch2_discard_one_bucket_fast(ca, new.k->p.offset);
if (statechange(a->data_type == BCH_DATA_cached) &&
!bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
- bch2_do_invalidates(c);
+ bch2_dev_do_invalidates(ca);
if (statechange(a->data_type == BCH_DATA_need_gc_gens))
bch2_gc_gens_async(c);
@@ -895,6 +911,11 @@ int bch2_trigger_alloc(struct btree_trans *trans,
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, new.k->p.offset);
+ if (unlikely(!g)) {
+ percpu_up_read(&c->mark_lock);
+ goto invalid_bucket;
+ }
+ g->gen_valid = 1;
bucket_lock(g);
@@ -910,8 +931,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
percpu_up_read(&c->mark_lock);
}
err:
+ printbuf_exit(&buf);
bch2_dev_put(ca);
return ret;
+invalid_bucket:
+ bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
+ (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
+ ret = -EIO;
+ goto err;
}
/*
@@ -1561,7 +1588,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
if (ret)
goto err;
- a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_norun);
if (ret)
@@ -1609,34 +1636,38 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
return ret;
}
-static int discard_in_flight_add(struct bch_fs *c, struct bpos bucket)
+static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
{
int ret;
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i)
- if (bkey_eq(*i, bucket)) {
- ret = -EEXIST;
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i)
+ if (i->bucket == bucket) {
+ ret = -BCH_ERR_EEXIST_discard_in_flight_add;
goto out;
}
- ret = darray_push(&c->discard_buckets_in_flight, bucket);
+ ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
+ .in_progress = in_progress,
+ .bucket = bucket,
+ }));
out:
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
return ret;
}
-static void discard_in_flight_remove(struct bch_fs *c, struct bpos bucket)
+static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
{
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i)
- if (bkey_eq(*i, bucket)) {
- darray_remove_item(&c->discard_buckets_in_flight, i);
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i)
+ if (i->bucket == bucket) {
+ BUG_ON(!i->in_progress);
+ darray_remove_item(&ca->discard_buckets_in_flight, i);
goto found;
}
BUG();
found:
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
}
struct discard_buckets_state {
@@ -1644,26 +1675,11 @@ struct discard_buckets_state {
u64 open;
u64 need_journal_commit;
u64 discarded;
- struct bch_dev *ca;
u64 need_journal_commit_this_dev;
};
-static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
-{
- if (s->ca == ca)
- return;
-
- if (s->ca && s->need_journal_commit_this_dev >
- bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
- bch2_journal_flush_async(&c->journal, NULL);
-
- if (s->ca)
- percpu_ref_put(&s->ca->io_ref);
- s->ca = ca;
- s->need_journal_commit_this_dev = 0;
-}
-
static int bch2_discard_one_bucket(struct btree_trans *trans,
+ struct bch_dev *ca,
struct btree_iter *need_discard_iter,
struct bpos *discard_pos_done,
struct discard_buckets_state *s)
@@ -1677,16 +1693,6 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
bool discard_locked = false;
int ret = 0;
- struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
- ? s->ca
- : bch2_dev_get_ioref(c, pos.inode, WRITE);
- if (!ca) {
- bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
- return 0;
- }
-
- discard_buckets_next_dev(c, s, ca);
-
if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
s->open++;
goto out;
@@ -1746,7 +1752,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
goto out;
}
- if (discard_in_flight_add(c, SPOS(iter.pos.inode, iter.pos.offset, true)))
+ if (discard_in_flight_add(ca, iter.pos.offset, true))
goto out;
discard_locked = true;
@@ -1770,8 +1776,9 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
}
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
- alloc_data_type_set(&a->v, a->v.data_type);
write:
+ alloc_data_type_set(&a->v, a->v.data_type);
+
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_WATERMARK_btree|
@@ -1783,7 +1790,7 @@ write:
s->discarded++;
out:
if (discard_locked)
- discard_in_flight_remove(c, iter.pos);
+ discard_in_flight_remove(ca, iter.pos.offset);
s->seen++;
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
@@ -1792,7 +1799,8 @@ out:
static void bch2_do_discards_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
+ struct bch_fs *c = ca->fs;
struct discard_buckets_state s = {};
struct bpos discard_pos_done = POS_MAX;
int ret;
@@ -1803,23 +1811,41 @@ static void bch2_do_discards_work(struct work_struct *work)
* successful commit:
*/
ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter,
- BTREE_ID_need_discard, POS_MIN, 0, k,
- bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
-
- discard_buckets_next_dev(c, &s, NULL);
+ for_each_btree_key_upto(trans, iter,
+ BTREE_ID_need_discard,
+ POS(ca->dev_idx, 0),
+ POS(ca->dev_idx, U64_MAX), 0, k,
+ bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s)));
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+ percpu_ref_put(&ca->io_ref);
+}
+
+void bch2_dev_do_discards(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->discard_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
void bch2_do_discards(struct bch_fs *c)
{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
- !queue_work(c->write_ref_wq, &c->discard_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+ for_each_member_device(c, ca)
+ bch2_dev_do_discards(ca);
}
static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
@@ -1848,68 +1874,69 @@ err:
static void bch2_do_discards_fast_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, discard_fast_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
+ struct bch_fs *c = ca->fs;
while (1) {
bool got_bucket = false;
- struct bpos bucket;
- struct bch_dev *ca;
+ u64 bucket;
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i) {
- if (i->snapshot)
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i) {
+ if (i->in_progress)
continue;
- ca = bch2_dev_get_ioref(c, i->inode, WRITE);
- if (!ca) {
- darray_remove_item(&c->discard_buckets_in_flight, i);
- continue;
- }
-
got_bucket = true;
- bucket = *i;
- i->snapshot = true;
+ bucket = i->bucket;
+ i->in_progress = true;
break;
}
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
if (!got_bucket)
break;
if (ca->mi.discard && !c->opts.nochanges)
blkdev_issue_discard(ca->disk_sb.bdev,
- bucket.offset * ca->mi.bucket_size,
+ bucket_to_sector(ca, bucket),
ca->mi.bucket_size,
GFP_KERNEL);
int ret = bch2_trans_do(c, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc,
- bch2_clear_bucket_needs_discard(trans, bucket));
+ BCH_WATERMARK_btree|
+ BCH_TRANS_COMMIT_no_enospc,
+ bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket)));
bch_err_fn(c, ret);
- percpu_ref_put(&ca->io_ref);
- discard_in_flight_remove(c, bucket);
+ discard_in_flight_remove(ca, bucket);
if (ret)
break;
}
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+ percpu_ref_put(&ca->io_ref);
}
-static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
+static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, bucket.inode);
- bool dead = !ca || percpu_ref_is_dying(&ca->io_ref);
- rcu_read_unlock();
+ struct bch_fs *c = ca->fs;
+
+ if (discard_in_flight_add(ca, bucket, false))
+ return;
+
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
- if (!dead &&
- !discard_in_flight_add(c, bucket) &&
- bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast) &&
- !queue_work(c->write_ref_wq, &c->discard_fast_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
static int invalidate_one_bucket(struct btree_trans *trans,
@@ -1957,8 +1984,8 @@ static int invalidate_one_bucket(struct btree_trans *trans,
a->v.data_type = 0;
a->v.dirty_sectors = 0;
a->v.cached_sectors = 0;
- a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
- a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
+ a->v.io_time[READ] = bch2_current_io_time(c, READ);
+ a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
ret = bch2_trans_commit(trans, NULL, NULL,
BCH_WATERMARK_btree|
@@ -1993,9 +2020,25 @@ err:
goto out;
}
+static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
+ struct bch_dev *ca, bool *wrapped)
+{
+ struct bkey_s_c k;
+again:
+ k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ if (!k.k && !*wrapped) {
+ bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
+ *wrapped = true;
+ goto again;
+ }
+
+ return k;
+}
+
static void bch2_do_invalidates_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
+ struct bch_fs *c = ca->fs;
struct btree_trans *trans = bch2_trans_get(c);
int ret = 0;
@@ -2003,31 +2046,63 @@ static void bch2_do_invalidates_work(struct work_struct *work)
if (ret)
goto err;
- for_each_member_device(c, ca) {
- s64 nr_to_invalidate =
- should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+ s64 nr_to_invalidate =
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+ struct btree_iter iter;
+ bool wrapped = false;
- ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
- lru_pos(ca->dev_idx, 0, 0),
- lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
- BTREE_ITER_intent, k,
- invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0,
+ ((bch2_current_io_time(c, READ) + U32_MAX) &
+ LRU_TIME_MAX)), 0);
- if (ret < 0) {
- bch2_dev_put(ca);
+ while (true) {
+ bch2_trans_begin(trans);
+
+ struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
break;
- }
+ if (!k.k)
+ break;
+
+ ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
+ if (ret)
+ break;
+
+ bch2_btree_iter_advance(&iter);
}
+ bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+ percpu_ref_put(&ca->io_ref);
+}
+
+void bch2_dev_do_invalidates(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->invalidate_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
void bch2_do_invalidates(struct bch_fs *c)
{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
- !queue_work(c->write_ref_wq, &c->invalidate_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+ for_each_member_device(c, ca)
+ bch2_dev_do_invalidates(ca);
}
int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
@@ -2186,7 +2261,7 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
if (ret)
return ret;
- now = atomic64_read(&c->io_clock[rw].now);
+ now = bch2_current_io_time(c, rw);
if (a->v.io_time[rw] == now)
goto out;
@@ -2343,16 +2418,20 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
set_bit(ca->dev_idx, c->rw_devs[i].d);
}
-void bch2_fs_allocator_background_exit(struct bch_fs *c)
+void bch2_dev_allocator_background_exit(struct bch_dev *ca)
+{
+ darray_exit(&ca->discard_buckets_in_flight);
+}
+
+void bch2_dev_allocator_background_init(struct bch_dev *ca)
{
- darray_exit(&c->discard_buckets_in_flight);
+ mutex_init(&ca->discard_buckets_in_flight_lock);
+ INIT_WORK(&ca->discard_work, bch2_do_discards_work);
+ INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
+ INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
}
void bch2_fs_allocator_background_init(struct bch_fs *c)
{
spin_lock_init(&c->freelist_lock);
- mutex_init(&c->discard_buckets_in_flight_lock);
- INIT_WORK(&c->discard_work, bch2_do_discards_work);
- INIT_WORK(&c->discard_fast_work, bch2_do_discards_fast_work);
- INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
}
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index ae31a94be6f9..ba2c5557a3f0 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -141,7 +141,13 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
!bch2_bucket_sectors_fragmented(ca, a))
return 0;
- u64 d = bch2_bucket_sectors_dirty(a);
+ /*
+ * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
+ * bucket_sectors_dirty is (much) bigger than bucket_size
+ */
+ u64 d = min(bch2_bucket_sectors_dirty(a),
+ ca->mi.bucket_size);
+
return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
}
@@ -269,6 +275,7 @@ int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
enum btree_iter_update_trigger_flags);
int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *);
+void bch2_dev_do_discards(struct bch_dev *);
void bch2_do_discards(struct bch_fs *);
static inline u64 should_invalidate_buckets(struct bch_dev *ca,
@@ -283,6 +290,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
}
+void bch2_dev_do_invalidates(struct bch_dev *);
void bch2_do_invalidates(struct bch_fs *);
static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
@@ -306,7 +314,9 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *);
void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
-void bch2_fs_allocator_background_exit(struct bch_fs *);
+void bch2_dev_allocator_background_exit(struct bch_dev *);
+void bch2_dev_allocator_background_init(struct bch_dev *);
+
void bch2_fs_allocator_background_init(struct bch_fs *);
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 927a5f300b30..9d3d64746a5b 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -621,13 +621,13 @@ again:
avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail)
- bch2_do_discards(c);
+ bch2_dev_do_discards(ca);
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
- bch2_do_invalidates(c);
+ bch2_dev_do_invalidates(ca);
if (!avail) {
if (cl && !waiting) {
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 692b1c7d5018..4321f9fb73bd 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -690,7 +690,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bpos bucket_pos;
+ struct bpos bucket_pos = POS_MIN;
struct bch_backpointer bp;
if (p.ptr.cached)
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index bc0ea2c4efef..1106fec6e155 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -457,6 +457,7 @@ enum bch_time_stats {
};
#include "alloc_types.h"
+#include "btree_gc_types.h"
#include "btree_types.h"
#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
@@ -488,53 +489,15 @@ enum bch_time_stats {
struct btree;
-enum gc_phase {
- GC_PHASE_NOT_RUNNING,
- GC_PHASE_START,
- GC_PHASE_SB,
-
- GC_PHASE_BTREE_stripes,
- GC_PHASE_BTREE_extents,
- GC_PHASE_BTREE_inodes,
- GC_PHASE_BTREE_dirents,
- GC_PHASE_BTREE_xattrs,
- GC_PHASE_BTREE_alloc,
- GC_PHASE_BTREE_quotas,
- GC_PHASE_BTREE_reflink,
- GC_PHASE_BTREE_subvolumes,
- GC_PHASE_BTREE_snapshots,
- GC_PHASE_BTREE_lru,
- GC_PHASE_BTREE_freespace,
- GC_PHASE_BTREE_need_discard,
- GC_PHASE_BTREE_backpointers,
- GC_PHASE_BTREE_bucket_gens,
- GC_PHASE_BTREE_snapshot_trees,
- GC_PHASE_BTREE_deleted_inodes,
- GC_PHASE_BTREE_logged_ops,
- GC_PHASE_BTREE_rebalance_work,
- GC_PHASE_BTREE_subvolume_children,
-
- GC_PHASE_PENDING_DELETE,
-};
-
-struct gc_pos {
- enum gc_phase phase;
- u16 level;
- struct bpos pos;
-};
-
-struct reflink_gc {
- u64 offset;
- u32 size;
- u32 refcount;
-};
-
-typedef GENRADIX(struct reflink_gc) reflink_gc_table;
-
struct io_count {
u64 sectors[2][BCH_DATA_NR];
};
+struct discard_in_flight {
+ bool in_progress:1;
+ u64 bucket:63;
+};
+
struct bch_dev {
struct kobject kobj;
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -596,6 +559,12 @@ struct bch_dev {
size_t inc_gen_really_needs_gc;
size_t buckets_waiting_on_journal;
+ struct work_struct invalidate_work;
+ struct work_struct discard_work;
+ struct mutex discard_buckets_in_flight_lock;
+ DARRAY(struct discard_in_flight) discard_buckets_in_flight;
+ struct work_struct discard_fast_work;
+
atomic64_t rebalance_work;
struct journal_device journal;
@@ -832,7 +801,8 @@ struct bch_fs {
/* BTREE CACHE */
struct bio_set btree_bio;
- struct workqueue_struct *io_complete_wq;
+ struct workqueue_struct *btree_read_complete_wq;
+ struct workqueue_struct *btree_write_submit_wq;
struct btree_root btree_roots_known[BTREE_ID_NR];
DARRAY(struct btree_root) btree_roots_extra;
@@ -956,11 +926,6 @@ struct bch_fs {
unsigned write_points_nr;
struct buckets_waiting_for_journal buckets_waiting_for_journal;
- struct work_struct invalidate_work;
- struct work_struct discard_work;
- struct mutex discard_buckets_in_flight_lock;
- DARRAY(struct bpos) discard_buckets_in_flight;
- struct work_struct discard_fast_work;
/* GARBAGE COLLECTION */
struct work_struct gc_gens_work;
@@ -1255,6 +1220,11 @@ static inline s64 bch2_current_time(const struct bch_fs *c)
return timespec_to_bch2_time(c, now);
}
+static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
+{
+ return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
+}
+
static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
{
struct stdio_redirect *stdio = c->stdio;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index d801e19cb489..e3b1bde489c3 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -476,6 +476,9 @@ struct bch_lru {
#define LRU_ID_STRIPES (1U << 16)
+#define LRU_TIME_BITS 48
+#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
+
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@@ -503,16 +506,22 @@ struct bch_sb_field {
#include "alloc_background_format.h"
#include "extents_format.h"
-#include "reflink_format.h"
#include "ec_format.h"
-#include "inode_format.h"
#include "dirent_format.h"
-#include "xattr_format.h"
-#include "quota_format.h"
+#include "disk_groups_format.h"
+#include "inode_format.h"
+#include "journal_seq_blacklist_format.h"
#include "logged_ops_format.h"
+#include "quota_format.h"
+#include "reflink_format.h"
+#include "replicas_format.h"
#include "snapshot_format.h"
#include "subvolume_format.h"
#include "sb-counters_format.h"
+#include "sb-downgrade_format.h"
+#include "sb-errors_format.h"
+#include "sb-members_format.h"
+#include "xattr_format.h"
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
@@ -545,107 +554,6 @@ struct bch_sb_field_journal_v2 {
} d[];
};
-/* BCH_SB_FIELD_members_v1: */
-
-#define BCH_MIN_NR_NBUCKETS (1 << 6)
-
-#define BCH_IOPS_MEASUREMENTS() \
- x(seqread, 0) \
- x(seqwrite, 1) \
- x(randread, 2) \
- x(randwrite, 3)
-
-enum bch_iops_measurement {
-#define x(t, n) BCH_IOPS_##t = n,
- BCH_IOPS_MEASUREMENTS()
-#undef x
- BCH_IOPS_NR
-};
-
-#define BCH_MEMBER_ERROR_TYPES() \
- x(read, 0) \
- x(write, 1) \
- x(checksum, 2)
-
-enum bch_member_error_type {
-#define x(t, n) BCH_MEMBER_ERROR_##t = n,
- BCH_MEMBER_ERROR_TYPES()
-#undef x
- BCH_MEMBER_ERROR_NR
-};
-
-struct bch_member {
- __uuid_t uuid;
- __le64 nbuckets; /* device size */
- __le16 first_bucket; /* index of first bucket used */
- __le16 bucket_size; /* sectors */
- __u8 btree_bitmap_shift;
- __u8 pad[3];
- __le64 last_mount; /* time_t */
-
- __le64 flags;
- __le32 iops[4];
- __le64 errors[BCH_MEMBER_ERROR_NR];
- __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
- __le64 errors_reset_time;
- __le64 seq;
- __le64 btree_allocated_bitmap;
- /*
- * On recovery from a clean shutdown we don't normally read the journal,
- * but we still want to resume writing from where we left off so we
- * don't overwrite more than is necessary, for list journal debugging:
- */
- __le32 last_journal_bucket;
- __le32 last_journal_bucket_offset;
-};
-
-/*
- * This limit comes from the bucket_gens array - it's a single allocation, and
- * kernel allocation are limited to INT_MAX
- */
-#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
-
-#define BCH_MEMBER_V1_BYTES 56
-
-LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
-/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
-LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
-LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
-LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
-LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
-LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
- struct bch_member, flags, 30, 31)
-
-#if 0
-LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
-LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
-#endif
-
-#define BCH_MEMBER_STATES() \
- x(rw, 0) \
- x(ro, 1) \
- x(failed, 2) \
- x(spare, 3)
-
-enum bch_member_state {
-#define x(t, n) BCH_MEMBER_STATE_##t = n,
- BCH_MEMBER_STATES()
-#undef x
- BCH_MEMBER_STATE_NR
-};
-
-struct bch_sb_field_members_v1 {
- struct bch_sb_field field;
- struct bch_member _members[]; //Members are now variable size
-};
-
-struct bch_sb_field_members_v2 {
- struct bch_sb_field field;
- __le16 member_bytes; //size of single member entry
- u8 pad[6];
- struct bch_member _members[];
-};
-
/* BCH_SB_FIELD_crypt: */
struct nonce {
@@ -694,8 +602,6 @@ LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
-/* BCH_SB_FIELD_replicas: */
-
#define BCH_DATA_TYPES() \
x(free, 0) \
x(sb, 1) \
@@ -738,50 +644,6 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
}
}
-struct bch_replicas_entry_v0 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 devs[];
-} __packed;
-
-struct bch_sb_field_replicas_v0 {
- struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[];
-} __packed __aligned(8);
-
-struct bch_replicas_entry_v1 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 nr_required;
- __u8 devs[];
-} __packed;
-
-#define replicas_entry_bytes(_i) \
- (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
-
-struct bch_sb_field_replicas {
- struct bch_sb_field field;
- struct bch_replicas_entry_v1 entries[];
-} __packed __aligned(8);
-
-/* BCH_SB_FIELD_disk_groups: */
-
-#define BCH_SB_LABEL_SIZE 32
-
-struct bch_disk_group {
- __u8 label[BCH_SB_LABEL_SIZE];
- __le64 flags[2];
-} __packed __aligned(8);
-
-LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
-LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
-LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
-
-struct bch_sb_field_disk_groups {
- struct bch_sb_field field;
- struct bch_disk_group entries[];
-} __packed __aligned(8);
-
/*
* On clean shutdown, store btree roots and current journal sequence number in
* the superblock:
@@ -809,27 +671,6 @@ struct bch_sb_field_clean {
__u64 _data[];
};
-struct journal_seq_blacklist_entry {
- __le64 start;
- __le64 end;
-};
-
-struct bch_sb_field_journal_seq_blacklist {
- struct bch_sb_field field;
- struct journal_seq_blacklist_entry start[];
-};
-
-struct bch_sb_field_errors {
- struct bch_sb_field field;
- struct bch_sb_field_error_entry {
- __le64 v;
- __le64 last_error_time;
- } entries[];
-};
-
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
-
struct bch_sb_field_ext {
struct bch_sb_field field;
__le64 recovery_passes_required[2];
@@ -837,18 +678,6 @@ struct bch_sb_field_ext {
__le64 btrees_lost_data;
};
-struct bch_sb_field_downgrade_entry {
- __le16 version;
- __le64 recovery_passes[2];
- __le16 nr_errors;
- __le16 errors[] __counted_by(nr_errors);
-} __packed __aligned(2);
-
-struct bch_sb_field_downgrade {
- struct bch_sb_field field;
- struct bch_sb_field_downgrade_entry entries[];
-};
-
/* Superblock: */
/*
@@ -909,7 +738,6 @@ unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_re
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
#define BCH_SB_SECTOR 8
-#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
#define BCH_SB_LAYOUT_SIZE_BITS_MAX 16 /* 32 MB */
@@ -1162,8 +990,9 @@ enum bch_version_upgrade_opts {
#define BCH_ERROR_ACTIONS() \
x(continue, 0) \
- x(ro, 1) \
- x(panic, 2)
+ x(fix_safe, 1) \
+ x(panic, 2) \
+ x(ro, 3)
enum bch_error_actions {
#define x(t, n) BCH_ON_ERROR_##t = n,
@@ -1557,9 +1386,10 @@ enum btree_id {
/*
* Maximum number of btrees that we will _ever_ have under the current scheme,
- * where we refer to them with bitfields
+ * where we refer to them with 64 bit bitfields - and we also need a bit for
+ * the interior btree node type:
*/
-#define BTREE_ID_NR_MAX 64
+#define BTREE_ID_NR_MAX 63
static inline bool btree_id_is_alloc(enum btree_id id)
{
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index f46978e5cb7c..94a1d1982fa8 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -1064,7 +1064,7 @@ void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
{
const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
u8 *l = k->key_start;
- u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
+ u8 *h = (u8 *) ((u64 *) k->_data + f->key_u64s) - 1;
while (l < h) {
swap(*l, *h);
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index c2c3dae52186..bd32aac05192 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -398,8 +398,12 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
for (i = 0; i < nr_compat; i++)
switch (!write ? i : nr_compat - 1 - i) {
case 0:
- if (big_endian != CPU_BIG_ENDIAN)
+ if (big_endian != CPU_BIG_ENDIAN) {
+ bch2_bkey_swab_key(f, k);
+ } else if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
bch2_bkey_swab_key(f, k);
+ bch2_bkey_swab_key(f, k);
+ }
break;
case 1:
if (version < bcachefs_metadata_version_bkey_renumber)
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 726ef7483763..baef0722f5fb 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -129,7 +129,8 @@ static inline void bch2_bkey_compat(unsigned level, enum btree_id btree_id,
struct bkey_packed *k)
{
if (version < bcachefs_metadata_version_current ||
- big_endian != CPU_BIG_ENDIAN)
+ big_endian != CPU_BIG_ENDIAN ||
+ IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
__bch2_bkey_compat(level, btree_id, version,
big_endian, write, f, k);
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 9e4ed75d3675..4f5e411771ba 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -91,10 +91,11 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
}
static const struct rhashtable_params bch_btree_cache_params = {
- .head_offset = offsetof(struct btree, hash),
- .key_offset = offsetof(struct btree, hash_val),
- .key_len = sizeof(u64),
- .obj_cmpfn = bch2_btree_cache_cmp_fn,
+ .head_offset = offsetof(struct btree, hash),
+ .key_offset = offsetof(struct btree, hash_val),
+ .key_len = sizeof(u64),
+ .obj_cmpfn = bch2_btree_cache_cmp_fn,
+ .automatic_shrinking = true,
};
static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 8035c8b797ab..0e477a926579 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -585,16 +585,17 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
bkey_version_in_future,
- "key version number higher than recorded: %llu > %llu",
- k.k->version.lo,
- atomic64_read(&c->key_version)))
+ "key version number higher than recorded %llu\n %s",
+ atomic64_read(&c->key_version),
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
atomic64_set(&c->key_version, k.k->version.lo);
}
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
c, btree_bitmap_not_marked,
"btree ptr not marked in member info btree allocated bitmap\n %s",
- (bch2_bkey_val_to_text(&buf, c, k),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
mutex_lock(&c->sb_lock);
bch2_dev_btree_bitmap_mark(c, k);
@@ -673,8 +674,7 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
{
- return (int) btree_id_to_gc_phase(l) -
- (int) btree_id_to_gc_phase(r);
+ return cmp_int(gc_btree_order(l), gc_btree_order(r));
}
static int bch2_gc_btrees(struct bch_fs *c)
@@ -711,7 +711,7 @@ fsck_err:
static int bch2_mark_superblocks(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_SB));
+ gc_pos_set(c, gc_phase(GC_PHASE_sb));
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
mutex_unlock(&c->sb_lock);
@@ -874,6 +874,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
const struct bch_alloc_v4 *old;
int ret;
+ if (!bucket_valid(ca, k.k->p.offset))
+ return 0;
+
old = bch2_alloc_to_v4(k, &old_convert);
gc = new = *old;
@@ -990,6 +993,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = ca->mi.nbuckets;
+ buckets->nbuckets_minus_first =
+ buckets->nbuckets - buckets->first_bucket;
rcu_assign_pointer(ca->buckets_gc, buckets);
}
@@ -1003,12 +1008,14 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
continue;
}
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+ if (bucket_valid(ca, k.k->p.offset)) {
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- struct bucket *g = gc_bucket(ca, k.k->p.offset);
- g->gen_valid = 1;
- g->gen = a->gen;
+ struct bucket *g = gc_bucket(ca, k.k->p.offset);
+ g->gen_valid = 1;
+ g->gen = a->gen;
+ }
0;
})));
bch2_dev_put(ca);
@@ -1209,7 +1216,7 @@ int bch2_check_allocations(struct bch_fs *c)
if (ret)
goto out;
- gc_pos_set(c, gc_phase(GC_PHASE_START));
+ gc_pos_set(c, gc_phase(GC_PHASE_start));
ret = bch2_mark_superblocks(c);
BUG_ON(ret);
@@ -1231,7 +1238,7 @@ out:
percpu_down_write(&c->mark_lock);
/* Indicates that gc is no longer in progress: */
- __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+ __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
bch2_gc_free(c);
percpu_up_write(&c->mark_lock);
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index 1b6489d8e0f4..876d81e2017d 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_BTREE_GC_H
#include "bkey.h"
+#include "btree_gc_types.h"
#include "btree_types.h"
int bch2_check_topology(struct bch_fs *);
@@ -32,36 +33,15 @@ int bch2_check_allocations(struct bch_fs *);
/* Position of (the start of) a gc phase: */
static inline struct gc_pos gc_phase(enum gc_phase phase)
{
- return (struct gc_pos) {
- .phase = phase,
- .level = 0,
- .pos = POS_MIN,
- };
-}
-
-static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
-{
- return cmp_int(l.phase, r.phase) ?:
- -cmp_int(l.level, r.level) ?:
- bpos_cmp(l.pos, r.pos);
-}
-
-static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
-{
- switch (id) {
-#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
- BCH_BTREE_IDS()
-#undef x
- default:
- BUG();
- }
+ return (struct gc_pos) { .phase = phase, };
}
static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
struct bpos pos)
{
return (struct gc_pos) {
- .phase = btree_id_to_gc_phase(btree),
+ .phase = GC_PHASE_btree,
+ .btree = btree,
.level = level,
.pos = pos,
};
@@ -76,6 +56,22 @@ static inline struct gc_pos gc_pos_btree_node(struct btree *b)
return gc_pos_btree(b->c.btree_id, b->c.level, b->key.k.p);
}
+static inline int gc_btree_order(enum btree_id btree)
+{
+ if (btree == BTREE_ID_stripes)
+ return -1;
+ return btree;
+}
+
+static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
+{
+ return cmp_int(l.phase, r.phase) ?:
+ cmp_int(gc_btree_order(l.btree),
+ gc_btree_order(r.btree)) ?:
+ -cmp_int(l.level, r.level) ?:
+ bpos_cmp(l.pos, r.pos);
+}
+
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
{
unsigned seq;
diff --git a/fs/bcachefs/btree_gc_types.h b/fs/bcachefs/btree_gc_types.h
new file mode 100644
index 000000000000..b82c24bcc088
--- /dev/null
+++ b/fs/bcachefs/btree_gc_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_GC_TYPES_H
+#define _BCACHEFS_BTREE_GC_TYPES_H
+
+#include <linux/generic-radix-tree.h>
+
+enum gc_phase {
+ GC_PHASE_not_running,
+ GC_PHASE_start,
+ GC_PHASE_sb,
+ GC_PHASE_btree,
+};
+
+struct gc_pos {
+ enum gc_phase phase:8;
+ enum btree_id btree:8;
+ u16 level;
+ struct bpos pos;
+};
+
+struct reflink_gc {
+ u64 offset;
+ u32 size;
+ u32 refcount;
+};
+
+typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+
+#endif /* _BCACHEFS_BTREE_GC_TYPES_H */
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index cbf8f5d90602..7bca15c604f5 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -519,7 +519,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
struct bch_dev *ca,
- struct btree *b, struct bset *i,
+ struct btree *b, struct bset *i, struct bkey_packed *k,
unsigned offset, int write)
{
prt_printf(out, bch2_log_msg(c, "%s"),
@@ -537,15 +537,20 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
b->written, btree_ptr_sectors_written(&b->key));
if (i)
prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+ if (k)
+ prt_printf(out, " bset byte offset %lu",
+ (unsigned long)(void *)k -
+ ((unsigned long)(void *)i & ~511UL));
prt_str(out, ": ");
}
-__printf(9, 10)
+__printf(10, 11)
static int __btree_err(int ret,
struct bch_fs *c,
struct bch_dev *ca,
struct btree *b,
struct bset *i,
+ struct bkey_packed *k,
int write,
bool have_retry,
enum bch_sb_error_id err_type,
@@ -555,7 +560,7 @@ static int __btree_err(int ret,
bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
va_list args;
- btree_err_msg(&out, c, ca, b, i, b->written, write);
+ btree_err_msg(&out, c, ca, b, i, k, b->written, write);
va_start(args, fmt);
prt_vprintf(&out, fmt, args);
@@ -611,9 +616,9 @@ fsck_err:
return ret;
}
-#define btree_err(type, c, ca, b, i, _err_type, msg, ...) \
+#define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
({ \
- int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \
+ int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
BCH_FSCK_ERR_##_err_type, \
msg, ##__VA_ARGS__); \
\
@@ -690,7 +695,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_version_compatible(version),
-BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_unsupported_version,
"unsupported bset version %u.%u",
BCH_VERSION_MAJOR(version),
@@ -698,7 +703,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
if (btree_err_on(version < c->sb.version_min,
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, NULL,
btree_node_bset_older_than_sb_min,
"bset version %u older than superblock version_min %u",
version, c->sb.version_min)) {
@@ -711,7 +716,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
if (btree_err_on(BCH_VERSION_MAJOR(version) >
BCH_VERSION_MAJOR(c->sb.version),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, NULL,
btree_node_bset_newer_than_sb,
"bset version %u newer than superblock version %u",
version, c->sb.version)) {
@@ -723,13 +728,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
-BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_unsupported_version,
"BSET_SEPARATE_WHITEOUTS no longer supported");
if (btree_err_on(offset + sectors > btree_sectors(c),
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_past_end_of_btree_node,
"bset past end of btree node")) {
i->u64s = 0;
@@ -739,13 +744,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(offset && !i->u64s,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_empty,
"empty bset");
btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_wrong_sector_offset,
"bset at wrong sector offset");
@@ -761,20 +766,20 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
/* XXX endianness */
btree_err_on(bp->seq != bn->keys.seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
bset_bad_seq,
"incorrect sequence number (wrong btree node)");
}
btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_btree,
"incorrect btree id");
btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_level,
"incorrect level");
@@ -793,7 +798,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_min_key,
"incorrect min_key: got %s should be %s",
(printbuf_reset(&buf1),
@@ -804,7 +809,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_max_key,
"incorrect max key %s",
(printbuf_reset(&buf1),
@@ -816,7 +821,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
-BCH_ERR_btree_node_read_err_bad_node,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_format,
"invalid bkey format: %s\n %s", buf1.buf,
(printbuf_reset(&buf2),
@@ -883,7 +888,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_past_bset_end,
"key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
@@ -892,14 +897,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_bad_format,
"invalid bkey format %u", k->format))
goto drop_this_key;
if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_bad_u64s,
"bad k->u64s %u (min %u max %zu)", k->u64s,
bkeyp_key_u64s(&b->format, k),
@@ -921,7 +926,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_bkey_val_to_text(&buf, c, u.s_c);
btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bad_bkey,
"invalid bkey: %s", buf.buf);
goto drop_this_key;
@@ -942,7 +947,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_bkey_to_text(&buf, u.k);
if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_out_of_order,
"%s", buf.buf))
goto drop_this_key;
@@ -1011,13 +1016,13 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
if (bch2_meta_read_fault("btree"))
btree_err(-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_fault_injected,
"dynamic fault");
btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_magic,
"bad magic: want %llx, got %llx",
bset_magic(c), le64_to_cpu(b->data->magic));
@@ -1032,7 +1037,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(b->data->keys.seq != bp->seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_seq,
"got wrong btree node: got\n%s",
(printbuf_reset(&buf),
@@ -1041,7 +1046,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
} else {
btree_err_on(!b->data->keys.seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_seq,
"bad btree header: seq 0\n%s",
(printbuf_reset(&buf),
@@ -1060,7 +1065,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_unknown_csum,
"unknown checksum type %llu", BSET_CSUM_TYPE(i));
@@ -1073,7 +1078,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(csum_bad,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_bad_csum,
"%s",
(printbuf_reset(&buf),
@@ -1088,7 +1093,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
!BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
-BCH_ERR_btree_node_read_err_incompatible,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_unsupported_version,
"btree node does not have NEW_EXTENT_OVERWRITE set");
@@ -1102,7 +1107,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_unknown_csum,
"unknown checksum type %llu", BSET_CSUM_TYPE(i));
@@ -1114,7 +1119,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(csum_bad,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_bad_csum,
"%s",
(printbuf_reset(&buf),
@@ -1152,14 +1157,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(blacklisted && first,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_blacklisted_journal_seq,
"first btree node bset has blacklisted journal seq (%llu)",
le64_to_cpu(i->journal_seq));
btree_err_on(blacklisted && ptr_written,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
first_bset_blacklisted_journal_seq,
"found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
le64_to_cpu(i->journal_seq),
@@ -1178,7 +1183,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
if (ptr_written) {
btree_err_on(b->written < ptr_written,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_data_missing,
"btree node data missing: expected %u sectors, found %u",
ptr_written, b->written);
@@ -1191,7 +1196,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
le64_to_cpu(bne->keys.journal_seq),
true),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bset_after_end,
"found bset signature after last bset");
}
@@ -1235,7 +1240,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bch2_bkey_val_to_text(&buf, c, u.s_c);
btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bad_bkey,
"%s", buf.buf);
@@ -1384,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio)
bch2_latency_acct(ca, rb->start_time, READ);
}
- queue_work(c->io_complete_wq, &rb->work);
+ queue_work(c->btree_read_complete_wq, &rb->work);
}
struct btree_node_read_all {
@@ -1471,18 +1476,18 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
written2 = btree_node_sectors_written(c, ra->buf[i]);
if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_replicas_sectors_written_mismatch,
"btree node sectors written mismatch: %u != %u",
written, written2) ||
btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_bset_after_end,
"found bset signature after last bset") ||
btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_replicas_data_mismatch,
"btree node replicas content mismatch"))
dump_bset_maps = true;
@@ -1651,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
btree_node_read_all_replicas_done(&ra->cl.work);
} else {
continue_at(&ra->cl, btree_node_read_all_replicas_done,
- c->io_complete_wq);
+ c->btree_read_complete_wq);
}
return 0;
@@ -1732,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
if (sync)
btree_node_read_work(&rb->work);
else
- queue_work(c->io_complete_wq, &rb->work);
+ queue_work(c->btree_read_complete_wq, &rb->work);
}
}
@@ -2224,7 +2229,7 @@ do_write:
atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
INIT_WORK(&wbio->work, btree_write_submit);
- queue_work(c->io_complete_wq, &wbio->work);
+ queue_work(c->btree_write_submit_wq, &wbio->work);
return;
err:
set_btree_node_noevict(b);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index d3bcb4e4e230..0ed9e6574fcd 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -221,11 +221,8 @@ static void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path)
{
struct bch_fs *c = trans->c;
- unsigned i;
-
- EBUG_ON(path->btree_id >= BTREE_ID_NR);
- for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
+ for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!path->l[i].b) {
BUG_ON(!path->cached &&
bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
@@ -251,8 +248,6 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
- BUG_ON(iter->btree_id >= BTREE_ID_NR);
-
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -3135,7 +3130,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
memset(trans, 0, sizeof(*trans));
- closure_init_stack(&trans->ref);
seqmutex_lock(&c->btree_trans_lock);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
@@ -3155,15 +3149,10 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
BUG_ON(pos_task &&
pid == pos_task->pid &&
pos->locked);
-
- if (pos_task && pid < pos_task->pid) {
- list_add_tail(&trans->list, &pos->list);
- goto list_add_done;
- }
}
}
- list_add_tail(&trans->list, &c->btree_trans_list);
-list_add_done:
+
+ list_add(&trans->list, &c->btree_trans_list);
seqmutex_unlock(&c->btree_trans_lock);
got_trans:
trans->c = c;
@@ -3204,6 +3193,8 @@ got_trans:
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
trans->srcu_lock_time = jiffies;
trans->srcu_held = true;
+
+ closure_init_stack_release(&trans->ref);
return trans;
}
@@ -3240,7 +3231,6 @@ void bch2_trans_put(struct btree_trans *trans)
trans_for_each_update(trans, i)
__btree_path_put(trans->paths + i->path, true);
trans->nr_updates = 0;
- trans->locking_wait.task = NULL;
check_btree_paths_leaked(trans);
@@ -3261,6 +3251,13 @@ void bch2_trans_put(struct btree_trans *trans)
if (unlikely(trans->journal_replay_not_finished))
bch2_journal_keys_put(c);
+ /*
+ * trans->ref protects trans->locking_wait.task, btree_paths array; used
+ * by cycle detector
+ */
+ closure_return_sync(&trans->ref);
+ trans->locking_wait.task = NULL;
+
unsigned long *paths_allocated = trans->paths_allocated;
trans->paths_allocated = NULL;
trans->paths = NULL;
@@ -3278,8 +3275,6 @@ void bch2_trans_put(struct btree_trans *trans)
trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
if (trans) {
- closure_sync(&trans->ref);
-
seqmutex_lock(&c->btree_trans_lock);
list_del(&trans->list);
seqmutex_unlock(&c->btree_trans_lock);
@@ -3385,8 +3380,6 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
if (trans) {
- closure_sync(&trans->ref);
-
seqmutex_lock(&c->btree_trans_lock);
list_del(&trans->list);
seqmutex_unlock(&c->btree_trans_lock);
@@ -3406,8 +3399,10 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
bch2_time_stats_exit(&s->lock_hold_times);
}
- if (c->btree_trans_barrier_initialized)
+ if (c->btree_trans_barrier_initialized) {
+ synchronize_srcu_expedited(&c->btree_trans_barrier);
cleanup_srcu_struct(&c->btree_trans_barrier);
+ }
mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_trans_pool);
}
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 75f5e6fe4634..2d3c0d45c37f 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -32,10 +32,11 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
}
static const struct rhashtable_params bch2_btree_key_cache_params = {
- .head_offset = offsetof(struct bkey_cached, hash),
- .key_offset = offsetof(struct bkey_cached, key),
- .key_len = sizeof(struct bkey_cached_key),
- .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+ .head_offset = offsetof(struct bkey_cached, hash),
+ .key_offset = offsetof(struct bkey_cached, key),
+ .key_len = sizeof(struct bkey_cached_key),
+ .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+ .automatic_shrinking = true,
};
__flatten
@@ -424,16 +425,16 @@ static int btree_key_cache_fill(struct btree_trans *trans,
goto err;
}
- if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+ ret = bch2_trans_relock(trans);
+ if (ret) {
kfree(new_k);
- trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
goto err;
}
- ret = bch2_trans_relock(trans);
- if (ret) {
+ if (!bch2_btree_node_relock(trans, ck_path, 0)) {
kfree(new_k);
+ trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
goto err;
}
}
@@ -840,7 +841,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
- freed++;
bc->nr_freed_nonpcpu--;
bc->freed++;
}
@@ -854,7 +854,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
- freed++;
bc->nr_freed_pcpu--;
bc->freed++;
}
@@ -876,23 +875,22 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
bc->skipped_dirty++;
- goto next;
} else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
bc->skipped_accessed++;
- goto next;
- } else if (bkey_cached_lock_for_evict(ck)) {
+ } else if (!bkey_cached_lock_for_evict(ck)) {
+ bc->skipped_lock_fail++;
+ } else {
bkey_cached_evict(bc, ck);
bkey_cached_free(bc, ck);
bc->moved_to_freelist++;
- } else {
- bc->skipped_lock_fail++;
+ freed++;
}
scanned++;
if (scanned >= nr)
break;
-next:
+
pos = next;
}
@@ -917,6 +915,14 @@ static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
long nr = atomic_long_read(&bc->nr_keys) -
atomic_long_read(&bc->nr_dirty);
+ /*
+ * Avoid hammering our shrinker too much if it's nearly empty - the
+ * shrinker code doesn't take into account how big our cache is, if it's
+ * mostly empty but the system is under memory pressure it causes nasty
+ * lock contention:
+ */
+ nr -= 128;
+
return max(0L, nr);
}
@@ -1025,9 +1031,10 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
if (!shrink)
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
bc->shrink = shrink;
- shrink->seeks = 0;
shrink->count_objects = bch2_btree_key_cache_count;
shrink->scan_objects = bch2_btree_key_cache_scan;
+ shrink->batch = 1 << 14;
+ shrink->seeks = 0;
shrink->private_data = c;
shrinker_register(shrink);
return 0;
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index c3e9b0cc7bbd..d66fff22109a 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -215,6 +215,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
if (unlikely(!best)) {
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 45cb8149d374..2cb0442f6cc9 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -72,10 +72,11 @@ static bool found_btree_node_is_readable(struct btree_trans *trans,
struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
bool ret = !IS_ERR_OR_NULL(b);
- if (ret) {
- f->sectors_written = b->written;
- six_unlock_read(&b->c.lock);
- }
+ if (!ret)
+ return ret;
+
+ f->sectors_written = b->written;
+ six_unlock_read(&b->c.lock);
/*
* We might update this node's range; if that happens, we need the node
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index d63db4fefe73..87f485e9c552 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -761,13 +761,13 @@ static inline bool btree_node_type_needs_gc(enum btree_node_type type)
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
BCH_BTREE_IDS()
#undef x
;
- return (1U << type) & mask;
+ return BIT_ULL(type) & mask;
}
static inline bool btree_id_is_extents(enum btree_id btree)
@@ -777,35 +777,35 @@ static inline bool btree_id_is_extents(enum btree_id btree)
static inline bool btree_type_has_snapshots(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
static inline bool btree_type_has_snapshot_field(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
static inline bool btree_type_has_ptrs(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
struct btree_root {
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index b469586517a8..743d57eba760 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -465,143 +465,172 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
return bch2_update_replicas_list(trans, &r.e, sectors);
}
-int bch2_check_fix_ptrs(struct btree_trans *trans,
- enum btree_id btree, unsigned level, struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags)
+static int bch2_check_fix_ptr(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
+ bool *do_update)
{
struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry_c;
- struct extent_ptr_decoded p = { 0 };
- bool do_update = false;
struct printbuf buf = PRINTBUF;
int ret = 0;
- percpu_down_read(&c->mark_lock);
+ struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
+ if (!ca) {
+ if (fsck_err(c, ptr_to_invalid_device,
+ "pointer to missing device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+ return 0;
+ }
- bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
- struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
- if (!ca) {
- if (fsck_err(c, ptr_to_invalid_device,
- "pointer to missing device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
- continue;
- }
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ if (!g) {
+ if (fsck_err(c, ptr_to_invalid_device,
+ "pointer to invalid bucket on device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+ goto out;
+ }
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
- if (fsck_err_on(!g->gen_valid,
- c, ptr_to_missing_alloc_key,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- } else {
- do_update = true;
- }
+ if (fsck_err_on(!g->gen_valid,
+ c, ptr_to_missing_alloc_key,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ } else {
+ *do_update = true;
}
+ }
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached &&
- (g->data_type != BCH_DATA_btree ||
- data_type == BCH_DATA_btree)) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- do_update = true;
- }
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached &&
+ (g->data_type != BCH_DATA_btree ||
+ data_type == BCH_DATA_btree)) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ *do_update = true;
+ }
+ }
+
+ if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+
+ if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
+ c, stale_dirty_ptr,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
+ goto out;
+
+ if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
+ c, ptr_bucket_data_type_mismatch,
+ "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (data_type == BCH_DATA_btree) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = data_type;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ *do_update = true;
}
+ }
+
+ if (p.has_ec) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
- if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ if (fsck_err_on(!m || !m->alive,
+ c, ptr_to_missing_stripe,
+ "pointer to nonexistent stripe %llu\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
+ (u64) p.ec.idx,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
+ *do_update = true;
- if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
- c, stale_dirty_ptr,
- "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
+ c, ptr_to_incorrect_stripe,
+ "pointer does not match stripe %llu\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
+ (u64) p.ec.idx,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
+ *do_update = true;
+ }
+out:
+fsck_err:
+ bch2_dev_put(ca);
+ printbuf_exit(&buf);
+ return ret;
+}
- if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
- goto next;
+int bch2_check_fix_ptrs(struct btree_trans *trans,
+ enum btree_id btree, unsigned level, struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry_c;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
- if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
- c, ptr_bucket_data_type_mismatch,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (data_type == BCH_DATA_btree) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = data_type;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- do_update = true;
- }
- }
+ percpu_down_read(&c->mark_lock);
- if (p.has_ec) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
-
- if (fsck_err_on(!m || !m->alive, c,
- ptr_to_missing_stripe,
- "pointer to nonexistent stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
-
- if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
- ptr_to_incorrect_stripe,
- "pointer does not match stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
- }
-next:
- bch2_dev_put(ca);
+ bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
+ ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
+ if (ret)
+ goto err;
}
if (do_update) {
@@ -716,7 +745,6 @@ found:
bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
}
err:
-fsck_err:
percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return ret;
@@ -987,6 +1015,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
enum btree_iter_update_trigger_flags flags)
{
bool insert = !(flags & BTREE_TRIGGER_overwrite);
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_fs *c = trans->c;
@@ -1019,6 +1048,13 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, bucket.offset);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
+ p.ptr.dev,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
@@ -1027,10 +1063,12 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
+err_unlock:
percpu_up_read(&c->mark_lock);
}
err:
bch2_dev_put(ca);
+ printbuf_exit(&buf);
return ret;
}
@@ -1134,7 +1172,7 @@ static int __trigger_extent(struct btree_trans *trans,
r.e.nr_required = 1;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- s64 disk_sectors;
+ s64 disk_sectors = 0;
ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
if (ret < 0)
return ret;
@@ -1318,10 +1356,11 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 b, enum bch_data_type data_type, unsigned sectors,
enum btree_iter_update_trigger_flags flags)
{
- int ret = 0;
-
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, b);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
+ ca->dev_idx, bch2_data_type_str(data_type)))
+ goto err_unlock;
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
@@ -1330,29 +1369,27 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type))) {
- ret = -EIO;
+ bch2_data_type_str(data_type)))
goto err;
- }
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_type_str(g->data_type ?: data_type),
- g->dirty_sectors, sectors)) {
- ret = -EIO;
+ g->dirty_sectors, sectors))
goto err;
- }
g->data_type = data_type;
g->dirty_sectors += sectors;
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
+ bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+ percpu_up_read(&c->mark_lock);
+ return 0;
err:
bucket_unlock(g);
- if (!ret)
- bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+err_unlock:
percpu_up_read(&c->mark_lock);
- return ret;
+ return -EIO;
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
@@ -1595,6 +1632,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
+ bucket_gens->nbuckets_minus_first =
+ bucket_gens->nbuckets - bucket_gens->first_bucket;
if (resize) {
down_write(&c->gc_lock);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 617ffde2fb7a..80ee0be9793e 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -93,7 +93,8 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
struct bucket_array *buckets = gc_bucket_array(ca);
- BUG_ON(!bucket_valid(ca, b));
+ if (b - buckets->first_bucket >= buckets->nbuckets_minus_first)
+ return NULL;
return buckets->b + b;
}
@@ -110,7 +111,8 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
{
struct bucket_gens *gens = bucket_gens(ca);
- BUG_ON(!bucket_valid(ca, b));
+ if (b - gens->first_bucket >= gens->nbuckets_minus_first)
+ return NULL;
return gens->b + b;
}
@@ -170,19 +172,22 @@ static inline int gen_after(u8 a, u8 b)
return r > 0 ? r : 0;
}
-static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
- return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr));
+ if (!gen)
+ return -1;
+ return gen_after(*gen, ptr->gen);
}
/**
* dev_ptr_stale() - check if a pointer points into a bucket that has been
* invalidated.
*/
-static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
rcu_read_lock();
- u8 ret = dev_ptr_stale_rcu(ca, ptr);
+ int ret = dev_ptr_stale_rcu(ca, ptr);
rcu_read_unlock();
return ret;
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 6a31740222a7..f636e17c4caf 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -22,6 +22,7 @@ struct bucket_array {
struct rcu_head rcu;
u16 first_bucket;
size_t nbuckets;
+ size_t nbuckets_minus_first;
struct bucket b[];
};
@@ -29,6 +30,7 @@ struct bucket_gens {
struct rcu_head rcu;
u16 first_bucket;
size_t nbuckets;
+ size_t nbuckets_minus_first;
u8 b[];
};
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 9e54323f0f5f..6d82e1165adc 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -216,7 +216,8 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(NULL, &thr->opts, optstr);
- kfree(optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
if (ret)
goto err;
@@ -319,7 +320,8 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
return ret;
ret = bch2_dev_add(c, path);
- kfree(path);
+ if (!IS_ERR(path))
+ kfree(path);
return ret;
}
@@ -850,7 +852,8 @@ static long bch2_ioctl_fsck_online(struct bch_fs *c,
ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(c, &thr->opts, optstr);
- kfree(optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
if (ret)
goto err;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 0d807c2ce9c6..1a0072eef109 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -202,9 +202,8 @@ restart_drop_conflicting_replicas:
bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
/* Now, drop excess replicas: */
-restart_drop_extra_replicas:
-
rcu_read_lock();
+restart_drop_extra_replicas:
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 51cbf3928361..f0d4727c4dc2 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = {
.read = bch2_cached_btree_nodes_read,
};
+typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
+
+static void list_sort(struct list_head *head, list_cmp_fn cmp)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, head)
+ while (!list_is_last(pos, head) &&
+ cmp(pos, pos->next) > 0) {
+ struct list_head *pos2, *next = pos->next;
+
+ list_del(next);
+ list_for_each(pos2, head)
+ if (cmp(next, pos2) < 0)
+ goto pos_found;
+ BUG();
+pos_found:
+ list_add_tail(next, pos2);
+ }
+}
+
+static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
+{
+ return cmp_int(l, r);
+}
+
static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
@@ -575,41 +601,39 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
- u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
restart:
seqmutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- struct task_struct *task = READ_ONCE(trans->locking_wait.task);
+ list_sort(&c->btree_trans_list, list_ptr_order_cmp);
- if (!task || task->pid <= i->iter)
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if ((ulong) trans < i->iter)
continue;
- closure_get(&trans->ref);
- seq = seqmutex_seq(&c->btree_trans_lock);
- seqmutex_unlock(&c->btree_trans_lock);
+ i->iter = (ulong) trans;
- ret = flush_buf(i);
- if (ret) {
- closure_put(&trans->ref);
- goto unlocked;
- }
+ if (!closure_get_not_zero(&trans->ref))
+ continue;
+
+ u32 seq = seqmutex_unlock(&c->btree_trans_lock);
bch2_btree_trans_to_text(&i->buf, trans);
prt_printf(&i->buf, "backtrace:\n");
printbuf_indent_add(&i->buf, 2);
- bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
+ bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
printbuf_indent_sub(&i->buf, 2);
prt_newline(&i->buf);
- i->iter = task->pid;
-
closure_put(&trans->ref);
+ ret = flush_buf(i);
+ if (ret)
+ goto unlocked;
+
if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart;
}
@@ -804,50 +828,55 @@ static const struct file_operations btree_transaction_stats_op = {
.read = btree_transaction_stats_read,
};
-static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+/* walk btree transactions until we find a deadlock and print it */
+static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
struct btree_trans *trans;
- ssize_t ret = 0;
- u32 seq;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- if (i->iter)
- goto out;
+ pid_t iter = 0;
restart:
seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
- if (!task || task->pid <= i->iter)
+ if (!task || task->pid <= iter)
continue;
- closure_get(&trans->ref);
- seq = seqmutex_seq(&c->btree_trans_lock);
- seqmutex_unlock(&c->btree_trans_lock);
+ iter = task->pid;
- ret = flush_buf(i);
- if (ret) {
- closure_put(&trans->ref);
- goto out;
- }
+ if (!closure_get_not_zero(&trans->ref))
+ continue;
- bch2_check_for_deadlock(trans, &i->buf);
+ u32 seq = seqmutex_unlock(&c->btree_trans_lock);
- i->iter = task->pid;
+ bool found = bch2_check_for_deadlock(trans, out) != 0;
closure_put(&trans->ref);
+ if (found)
+ return;
+
if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart;
}
seqmutex_unlock(&c->btree_trans_lock);
-out:
+}
+
+static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ ssize_t ret = 0;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ if (!i->iter) {
+ btree_deadlock_to_text(&i->buf, c);
+ i->iter++;
+ }
+
if (i->buf.allocation_failure)
ret = -ENOMEM;
diff --git a/fs/bcachefs/disk_groups_format.h b/fs/bcachefs/disk_groups_format.h
new file mode 100644
index 000000000000..698990bbf1d2
--- /dev/null
+++ b/fs/bcachefs/disk_groups_format.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_FORMAT_H
+#define _BCACHEFS_DISK_GROUPS_FORMAT_H
+
+#define BCH_SB_LABEL_SIZE 32
+
+struct bch_disk_group {
+ __u8 label[BCH_SB_LABEL_SIZE];
+ __le64 flags[2];
+} __packed __aligned(8);
+
+LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
+LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
+LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
+
+struct bch_sb_field_disk_groups {
+ struct bch_sb_field field;
+ struct bch_disk_group entries[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_DISK_GROUPS_FORMAT_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index b26dc7424662..83e279d41829 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -268,6 +268,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
@@ -289,6 +290,13 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, bucket.offset);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
+ ptr->dev,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
@@ -297,10 +305,12 @@ static int mark_stripe_bucket(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
+err_unlock:
percpu_up_read(&c->mark_lock);
}
err:
bch2_dev_put(ca);
+ printbuf_exit(&buf);
return ret;
}
@@ -714,10 +724,12 @@ static void ec_block_endio(struct bio *bio)
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
- if (dev_ptr_stale(ca, ptr)) {
+ int stale = dev_ptr_stale(ca, ptr);
+ if (stale) {
bch_err_ratelimited(ca->fs,
- "error %s stripe: stale pointer after io",
- bio_data_dir(bio) == READ ? "reading from" : "writing to");
+ "error %s stripe: stale/invalid pointer (%i) after io",
+ bio_data_dir(bio) == READ ? "reading from" : "writing to",
+ stale);
clear_bit(ec_bio->idx, ec_bio->buf->valid);
}
@@ -743,10 +755,12 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
return;
}
- if (dev_ptr_stale(ca, ptr)) {
+ int stale = dev_ptr_stale(ca, ptr);
+ if (stale) {
bch_err_ratelimited(c,
- "error %s stripe: stale pointer",
- rw == READ ? "reading from" : "writing to");
+ "error %s stripe: stale pointer (%i)",
+ rw == READ ? "reading from" : "writing to",
+ stale);
clear_bit(idx, buf->valid);
return;
}
@@ -908,7 +922,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
- if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
+ if (c->gc_pos.phase != GC_PHASE_not_running &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index dbe35b80bc0b..58612abf7927 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -116,6 +116,9 @@
x(ENOENT, ENOENT_dev_idx_not_found) \
x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \
x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \
+ x(EEXIST, EEXIST_str_hash_set) \
+ x(EEXIST, EEXIST_discard_in_flight_add) \
+ x(EEXIST, EEXIST_subvolume_create) \
x(0, open_buckets_empty) \
x(0, freelist_empty) \
x(BCH_ERR_freelist_empty, no_buckets_found) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index c66eeffcd7f2..d95c40f1b6af 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -15,6 +15,7 @@ bool bch2_inconsistent_error(struct bch_fs *c)
switch (c->opts.errors) {
case BCH_ON_ERROR_continue:
return false;
+ case BCH_ON_ERROR_fix_safe:
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
@@ -191,6 +192,12 @@ static void prt_actioning(struct printbuf *out, const char *action)
prt_str(out, "ing");
}
+static const u8 fsck_flags_extra[] = {
+#define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags,
+ BCH_SB_ERRS()
+#undef x
+};
+
int bch2_fsck_err(struct bch_fs *c,
enum bch_fsck_flags flags,
enum bch_sb_error_id err,
@@ -203,6 +210,9 @@ int bch2_fsck_err(struct bch_fs *c,
int ret = -BCH_ERR_fsck_ignore;
const char *action_orig = "fix?", *action = action_orig;
+ if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra)))
+ flags |= fsck_flags_extra[err];
+
if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent))
return -BCH_ERR_fsck_fix;
@@ -265,7 +275,14 @@ int bch2_fsck_err(struct bch_fs *c,
prt_printf(out, bch2_log_msg(c, ""));
#endif
- if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
+ if ((flags & FSCK_CAN_FIX) &&
+ (flags & FSCK_AUTOFIX) &&
+ (c->opts.errors == BCH_ON_ERROR_continue ||
+ c->opts.errors == BCH_ON_ERROR_fix_safe)) {
+ prt_str(out, ", ");
+ prt_actioning(out, action);
+ ret = -BCH_ERR_fsck_fix;
+ } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
if (c->opts.errors != BCH_ON_ERROR_continue ||
!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
prt_str(out, ", shutting down");
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 36caedf72d89..777711504c35 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -108,13 +108,6 @@ struct fsck_err_state {
char *last_msg;
};
-enum bch_fsck_flags {
- FSCK_CAN_FIX = 1 << 0,
- FSCK_CAN_IGNORE = 1 << 1,
- FSCK_NEED_FSCK = 1 << 2,
- FSCK_NO_RATELIMIT = 1 << 3,
-};
-
#define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
__printf(4, 5) __cold
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 469037929685..410b8bd81b5a 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -137,7 +137,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
- if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr)))
+ if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
continue;
f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
@@ -999,7 +999,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
bch2_bkey_drop_ptrs(k, ptr,
ptr->cached &&
(ca = bch2_dev_rcu(c, ptr->dev)) &&
- dev_ptr_stale_rcu(ca, ptr));
+ dev_ptr_stale_rcu(ca, ptr) > 0);
rcu_read_unlock();
return bkey_deleted(k.k);
@@ -1024,8 +1024,11 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
prt_str(out, " cached");
if (ptr->unwritten)
prt_str(out, " unwritten");
- if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr))
+ int stale = dev_ptr_stale_rcu(ca, ptr);
+ if (stale > 0)
prt_printf(out, " stale");
+ else if (stale)
+ prt_printf(out, " invalid");
}
rcu_read_unlock();
--out->atomic;
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 6b69e5cd68dd..54873ecc635c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -437,8 +437,8 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
*/
/*
- * PageWriteback is effectively our ref on the inode - fixup i_blocks
- * before calling end_page_writeback:
+ * The writeback flag is effectively our ref on the inode -
+ * fixup i_blocks before calling folio_end_writeback:
*/
bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
@@ -898,7 +898,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
darray_for_each(fs, fi) {
f = *fi;
f_len = min(end, folio_end_pos(f)) - f_pos;
- f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
+ f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
if (!f_copied) {
folios_trunc(&fs, fi);
break;
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 09d21aef879a..049b61bc9a5b 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -609,8 +609,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
if (unlikely(ret))
goto err_put_write_ref;
- if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
+ if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) {
+ ret = -EINVAL;
goto err_put_write_ref;
+ }
inode_dio_begin(&inode->v);
bch2_pagecache_block_get(inode);
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 205a323ffc6d..79a0c8732bce 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -308,8 +308,8 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
return ret;
}
-static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
+static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
+ struct bch_ioctl_subvolume arg)
{
struct inode *dir;
struct bch_inode_info *inode;
@@ -373,7 +373,7 @@ retry:
}
if (dst_dentry->d_inode) {
- error = -EEXIST;
+ error = -BCH_ERR_EEXIST_subvolume_create;
goto err3;
}
@@ -406,9 +406,12 @@ retry:
!arg.src_ptr)
snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol;
+ down_write(&c->snapshot_create_lock);
inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir),
dst_dentry, arg.mode|S_IFDIR,
0, snapshot_src, create_flags);
+ up_write(&c->snapshot_create_lock);
+
error = PTR_ERR_OR_ZERO(inode);
if (error)
goto err3;
@@ -429,16 +432,6 @@ err1:
return error;
}
-static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
-{
- down_write(&c->snapshot_create_lock);
- long ret = __bch2_ioctl_subvolume_create(c, filp, arg);
- up_write(&c->snapshot_create_lock);
-
- return ret;
-}
-
static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
struct bch_ioctl_subvolume arg)
{
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 96040a95cf46..f9c9a95d7d4c 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -188,6 +188,12 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
BUG_ON(!old);
if (unlikely(old != inode)) {
+ /*
+ * bcachefs doesn't use I_NEW; we have no use for it since we
+ * only insert fully created inodes in the inode hash table. But
+ * discard_new_inode() expects it to be set...
+ */
+ inode->v.i_flags |= I_NEW;
discard_new_inode(&inode->v);
inode = old;
} else {
@@ -195,8 +201,10 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
mutex_unlock(&c->vfs_inodes_lock);
/*
- * we really don't want insert_inode_locked2() to be setting
- * I_NEW...
+ * Again, I_NEW makes no sense for bcachefs. This is only needed
+ * for clearing I_NEW, but since the inode was already fully
+ * created and initialized we didn't actually want
+ * inode_insert5() to set it for us.
*/
unlock_new_inode(&inode->v);
}
@@ -227,7 +235,9 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
mutex_init(&inode->ei_update_lock);
two_state_lock_init(&inode->ei_pagecache_lock);
INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
+ inode->ei_flags = 0;
mutex_init(&inode->ei_quota_lock);
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
inode->v.i_state = 0;
if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) {
@@ -1155,6 +1165,7 @@ static const struct file_operations bch_file_operations = {
.read_iter = bch2_read_iter,
.write_iter = bch2_write_iter,
.mmap = bch2_mmap,
+ .get_unmapped_area = thp_get_unmapped_area,
.fsync = bch2_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
@@ -1486,11 +1497,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
bch2_iget5_set(&inode->v, &inum);
bch2_inode_update_after_write(trans, inode, bi, ~0);
- if (BCH_SUBVOLUME_SNAP(subvol))
- set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
- else
- clear_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
-
inode->v.i_blocks = bi->bi_sectors;
inode->v.i_ino = bi->bi_inum;
inode->v.i_rdev = bi->bi_dev;
@@ -1502,6 +1508,9 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
inode->ei_qid = bch_qid(bi);
inode->ei_subvol = inum.subvol;
+ if (BCH_SUBVOLUME_SNAP(subvol))
+ set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
+
inode->v.i_mapping->a_ops = &bch_address_space_operations;
switch (inode->v.i_mode & S_IFMT) {
@@ -1939,8 +1948,7 @@ got_sb:
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
- ret = bch2_err_class(ret);
- return ERR_PTR(ret);
+ goto err;
}
c = sb->s_fs_info;
@@ -1968,6 +1976,7 @@ got_sb:
sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
sb->s_uuid = c->sb.user_uuid;
+ sb->s_shrink->seeks = 0;
c->vfs_sb = sb;
strscpy(sb->s_id, c->name, sizeof(sb->s_id));
@@ -2016,6 +2025,15 @@ out:
err_put_super:
__bch2_fs_stop(c);
deactivate_locked_super(sb);
+err:
+ /*
+ * On an inconsistency error in recovery we might see an -EROFS derived
+ * errorcode (from the journal), but we don't want to return that to
+ * userspace as that causes util-linux to retry the mount RO - which is
+ * confusing:
+ */
+ if (bch2_err_matches(ret, EROFS) && ret != -EROFS)
+ ret = -EIO;
return ERR_PTR(bch2_err_class(ret));
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index c8f57465131c..921bcdb3e5e4 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -77,21 +77,17 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
struct bkey_s_c k;
int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
- POS(0, inode_nr),
- BTREE_ITER_all_snapshots);
- k = bch2_btree_iter_peek(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
+ BTREE_ITER_all_snapshots, k, ret) {
+ if (k.k->p.offset != inode_nr)
+ break;
+ if (!bkey_is_inode(k.k))
+ continue;
+ ret = bch2_inode_unpack(k, inode);
+ goto found;
}
-
- ret = bch2_inode_unpack(k, inode);
-err:
+ ret = -BCH_ERR_ENOENT_inode;
+found:
bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -770,25 +766,6 @@ static int get_visible_inodes(struct btree_trans *trans,
return ret;
}
-static int check_key_has_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
- bkey_in_missing_snapshot,
- "key in missing snapshot: %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node) ?: 1;
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
static int hash_redo_key(struct btree_trans *trans,
const struct bch_hash_desc desc,
struct bch_hash_info *hash_info,
@@ -983,7 +960,7 @@ static int check_inode(struct btree_trans *trans,
bool do_update = false;
int ret;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret < 0)
goto err;
if (ret)
@@ -1487,7 +1464,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct printbuf buf = PRINTBUF;
int ret = 0;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret) {
ret = ret < 0 ? ret : 0;
goto out;
@@ -1700,6 +1677,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
trans_was_restarted(trans, restart_count);
}
+noinline_for_stack
static int check_dirent_inode_dirent(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c_dirent d,
@@ -1796,6 +1774,7 @@ out_noiter:
return ret;
}
+noinline_for_stack
static int check_dirent_target(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c_dirent d,
@@ -1870,6 +1849,7 @@ found:
return ret;
}
+noinline_for_stack
static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c_dirent d)
{
@@ -2010,7 +1990,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct printbuf buf = PRINTBUF;
int ret = 0;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret) {
ret = ret < 0 ? ret : 0;
goto out;
@@ -2165,7 +2145,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
struct inode_walker_entry *i;
int ret;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret < 0)
return ret;
if (ret)
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index f57486794484..c97fa7002b06 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -84,9 +84,10 @@ struct promote_op {
};
static const struct rhashtable_params bch_promote_params = {
- .head_offset = offsetof(struct promote_op, hash),
- .key_offset = offsetof(struct promote_op, pos),
- .key_len = sizeof(struct bpos),
+ .head_offset = offsetof(struct promote_op, hash),
+ .key_offset = offsetof(struct promote_op, pos),
+ .key_len = sizeof(struct bpos),
+ .automatic_shrinking = true,
};
static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
@@ -776,18 +777,32 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
PTR_BUCKET_POS(ca, &ptr),
BTREE_ITER_cached);
- prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
- printbuf_indent_add(&buf, 2);
+ u8 *gen = bucket_gen(ca, iter.pos.offset);
+ if (gen) {
- bch2_bkey_val_to_text(&buf, c, k);
- prt_newline(&buf);
+ prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
+ printbuf_indent_add(&buf, 2);
- prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
-
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (!ret) {
+ bch2_bkey_val_to_text(&buf, c, k);
prt_newline(&buf);
+
+ prt_printf(&buf, "memory gen: %u", *gen);
+
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ if (!ret) {
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ }
+ } else {
+ prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n",
+ iter.pos.inode, iter.pos.offset);
+ printbuf_indent_add(&buf, 2);
+
+ prt_printf(&buf, "first bucket %u nbuckets %llu\n",
+ ca->mi.first_bucket, ca->mi.nbuckets);
+
bch2_bkey_val_to_text(&buf, c, k);
+ prt_newline(&buf);
}
bch2_fs_inconsistent(c, "%s", buf.buf);
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 9401d13e31bb..05e0cbef420b 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -1220,7 +1220,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
u32 snapshot;
struct bucket_to_lock *stale_at;
- int ret;
+ int stale, ret;
if (op->flags & BCH_WRITE_MOVE)
return;
@@ -1299,7 +1299,8 @@ retry:
BUCKET_NOCOW_LOCK_UPDATE);
rcu_read_lock();
- bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
+ u8 *gen = bucket_gen(ca, i->b.offset);
+ stale = !gen ? -1 : gen_after(*gen, i->gen);
rcu_read_unlock();
if (unlikely(stale)) {
@@ -1380,8 +1381,18 @@ err_bucket_stale:
break;
}
- /* We can retry this: */
- ret = -BCH_ERR_transaction_restart;
+ struct printbuf buf = PRINTBUF;
+ if (bch2_fs_inconsistent_on(stale < 0, c,
+ "pointer to invalid bucket in nocow path on device %llu\n %s",
+ stale_at->b.inode,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ } else {
+ /* We can retry this: */
+ ret = -BCH_ERR_transaction_restart;
+ }
+ printbuf_exit(&buf);
+
goto err_get_ioref;
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index adec8e1ea73e..13669dd0e375 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1167,6 +1167,9 @@ void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
void bch2_fs_journal_stop(struct journal *j)
{
+ if (!test_bit(JOURNAL_running, &j->flags))
+ return;
+
bch2_journal_reclaim_stop(j);
bch2_journal_flush_all_pins(j);
@@ -1518,6 +1521,11 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
struct journal_entry_pin *pin;
spin_lock(&j->lock);
+ if (!test_bit(JOURNAL_running, &j->flags)) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
*seq = max(*seq, j->pin.front);
if (*seq >= j->pin.back) {
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index cdcb1ad49af4..db24ce21b2ac 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1677,6 +1677,13 @@ static CLOSURE_CALLBACK(journal_write_done)
mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
}
+ /*
+ * We don't typically trigger journal writes from her - the next journal
+ * write will be triggered immediately after the previous one is
+ * allocated, in bch2_journal_write() - but the journal write error path
+ * is special:
+ */
+ bch2_journal_do_writes(j);
spin_unlock(&j->lock);
}
@@ -1967,7 +1974,6 @@ CLOSURE_CALLBACK(bch2_journal_write)
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_replicas_padded replicas;
- struct printbuf journal_debug_buf = PRINTBUF;
unsigned nr_rw_members = 0;
int ret;
@@ -2011,11 +2017,15 @@ CLOSURE_CALLBACK(bch2_journal_write)
}
if (ret) {
- __bch2_journal_debug_to_text(&journal_debug_buf, j);
+ struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
+ prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write: %s"),
+ bch2_err_str(ret));
+ __bch2_journal_debug_to_text(&buf, j);
spin_unlock(&j->lock);
- bch_err(c, "Unable to allocate journal write:\n%s",
- journal_debug_buf.buf);
- printbuf_exit(&journal_debug_buf);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
goto err;
}
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index ed4846709611..1f25c111c54c 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -232,7 +232,7 @@ bool bch2_blacklist_entries_gc(struct bch_fs *c)
BUG_ON(nr != t->nr);
unsigned i;
- for (src = bl->start, i = eytzinger0_first(t->nr);
+ for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr);
src < bl->start + nr;
src++, i = eytzinger0_next(i, nr)) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
diff --git a/fs/bcachefs/journal_seq_blacklist_format.h b/fs/bcachefs/journal_seq_blacklist_format.h
new file mode 100644
index 000000000000..2566b12dbc04
--- /dev/null
+++ b/fs/bcachefs/journal_seq_blacklist_format.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+
+struct journal_seq_blacklist_entry {
+ __le64 start;
+ __le64 end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+ struct bch_sb_field field;
+ struct journal_seq_blacklist_entry start[];
+};
+
+#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H */
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index fb11ab0dd00e..bd71ba77de07 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -2,9 +2,6 @@
#ifndef _BCACHEFS_LRU_H
#define _BCACHEFS_LRU_H
-#define LRU_TIME_BITS 48
-#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
-
static inline u64 lru_pos_id(struct bpos pos)
{
return pos.inode >> LRU_TIME_BITS;
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
index 4c298e74723d..e9d9c0212e44 100644
--- a/fs/bcachefs/mean_and_variance_test.c
+++ b/fs/bcachefs/mean_and_variance_test.c
@@ -217,4 +217,5 @@ static struct kunit_suite mean_and_variance_test_suite = {
kunit_test_suite(mean_and_variance_test_suite);
MODULE_AUTHOR("Daniel B. Hill");
+MODULE_DESCRIPTION("bcachefs filesystem mean and variance unit tests");
MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 8171f947fac8..6e477fadaa2a 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -547,6 +547,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
ctxt->stats->pos = BBPOS(btree_id, start);
}
+ bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, btree_id, start,
BTREE_ITER_prefetch|
BTREE_ITER_all_snapshots);
@@ -920,7 +921,20 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg,
? c->opts.metadata_replicas
: io_opts->data_replicas;
- if (!nr_good || nr_good >= replicas)
+ rcu_read_lock();
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ unsigned i = 0;
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ptr->cached &&
+ (!ca || !ca->mi.durability))
+ data_opts->kill_ptrs |= BIT(i);
+ i++;
+ }
+ rcu_read_unlock();
+
+ if (!data_opts->kill_ptrs &&
+ (!nr_good || nr_good >= replicas))
return false;
data_opts->target = 0;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 10bfb31c151b..eb49dd045eff 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -35,9 +35,10 @@ struct buckets_in_flight {
};
static const struct rhashtable_params bch_move_bucket_params = {
- .head_offset = offsetof(struct move_bucket_in_flight, hash),
- .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
- .key_len = sizeof(struct move_bucket_key),
+ .head_offset = offsetof(struct move_bucket_in_flight, hash),
+ .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
+ .key_len = sizeof(struct move_bucket_key),
+ .automatic_shrinking = true,
};
static struct move_bucket_in_flight *
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 25530e0bb2f3..b197ec90d4cb 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -137,7 +137,7 @@ enum fsck_err_opts {
x(errors, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_error_actions), \
- BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \
+ BCH_SB_ERROR_ACTION, BCH_ON_ERROR_fix_safe, \
NULL, "Action to take on filesystem error") \
x(metadata_replicas, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index cf513fc79ce4..1f9d044ed920 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -326,6 +326,12 @@ static int journal_replay_entry_early(struct bch_fs *c,
case BCH_JSET_ENTRY_btree_root: {
struct btree_root *r;
+ if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX,
+ c, invalid_btree_id,
+ "invalid btree id %u (max %u)",
+ entry->btree_id, BTREE_ID_NR_MAX))
+ return 0;
+
while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
if (ret)
@@ -415,7 +421,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
}
}
-
+fsck_err:
return ret;
}
@@ -658,10 +664,10 @@ int bch2_fs_recovery(struct bch_fs *c)
if (check_version_upgrade(c))
write_sb = true;
+ c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+
if (write_sb)
bch2_write_super(c);
-
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
new file mode 100644
index 000000000000..b97208195d06
--- /dev/null
+++ b/fs/bcachefs/replicas_format.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REPLICAS_FORMAT_H
+#define _BCACHEFS_REPLICAS_FORMAT_H
+
+struct bch_replicas_entry_v0 {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 devs[];
+} __packed;
+
+struct bch_sb_field_replicas_v0 {
+ struct bch_sb_field field;
+ struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
+
+struct bch_replicas_entry_v1 {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 nr_required;
+ __u8 devs[];
+} __packed;
+
+struct bch_sb_field_replicas {
+ struct bch_sb_field field;
+ struct bch_replicas_entry_v1 entries[];
+} __packed __aligned(8);
+
+#define replicas_entry_bytes(_i) \
+ (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+
+#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index 390a1bbd2567..4710b61631f0 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -146,10 +146,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
for (const struct bch_sb_field_downgrade_entry *i = e->entries;
(void *) i < vstruct_end(&e->field);
i = downgrade_entry_next_c(i)) {
+ /*
+ * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
+ * section sizes are 8 byte aligned - an empty entry spanning
+ * the end of the section is allowed (and ignored):
+ */
+ if ((void *) &i->errors[0] > vstruct_end(&e->field))
+ break;
+
if (flags & BCH_VALIDATE_write &&
- ((void *) &i->errors[0] > vstruct_end(&e->field) ||
- (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) {
- prt_printf(err, "downgrade entry overruns end of superblock section)");
+ (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
+ prt_printf(err, "downgrade entry overruns end of superblock section");
return -BCH_ERR_invalid_sb_downgrade;
}
@@ -221,7 +228,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
dst = (void *) &darray_top(table);
dst->version = cpu_to_le16(src->version);
- dst->recovery_passes[0] = cpu_to_le64(src->recovery_passes);
+ dst->recovery_passes[0] = cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes));
dst->recovery_passes[1] = 0;
dst->nr_errors = cpu_to_le16(src->nr_errors);
for (unsigned i = 0; i < src->nr_errors; i++)
diff --git a/fs/bcachefs/sb-downgrade_format.h b/fs/bcachefs/sb-downgrade_format.h
new file mode 100644
index 000000000000..cffd932be3ec
--- /dev/null
+++ b/fs/bcachefs/sb-downgrade_format.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+#define _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+
+struct bch_sb_field_downgrade_entry {
+ __le16 version;
+ __le64 recovery_passes[2];
+ __le16 nr_errors;
+ __le16 errors[] __counted_by(nr_errors);
+} __packed __aligned(2);
+
+struct bch_sb_field_downgrade {
+ struct bch_sb_field field;
+ struct bch_sb_field_downgrade_entry entries[];
+};
+
+#endif /* _BCACHEFS_SB_DOWNGRADE_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index bda33e59e226..c1270d790e43 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -110,19 +110,25 @@ out:
void bch2_sb_errors_from_cpu(struct bch_fs *c)
{
bch_sb_errors_cpu *src = &c->fsck_error_counts;
- struct bch_sb_field_errors *dst =
- bch2_sb_field_resize(&c->disk_sb, errors,
- bch2_sb_field_errors_u64s(src->nr));
+ struct bch_sb_field_errors *dst;
unsigned i;
+ mutex_lock(&c->fsck_error_counts_lock);
+
+ dst = bch2_sb_field_resize(&c->disk_sb, errors,
+ bch2_sb_field_errors_u64s(src->nr));
+
if (!dst)
- return;
+ goto err;
for (i = 0; i < src->nr; i++) {
SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
}
+
+err:
+ mutex_unlock(&c->fsck_error_counts_lock);
}
static int bch2_sb_errors_to_cpu(struct bch_fs *c)
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
new file mode 100644
index 000000000000..d6f35a99c429
--- /dev/null
+++ b/fs/bcachefs/sb-errors_format.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_FORMAT_H
+#define _BCACHEFS_SB_ERRORS_FORMAT_H
+
+enum bch_fsck_flags {
+ FSCK_CAN_FIX = 1 << 0,
+ FSCK_CAN_IGNORE = 1 << 1,
+ FSCK_NEED_FSCK = 1 << 2,
+ FSCK_NO_RATELIMIT = 1 << 3,
+ FSCK_AUTOFIX = 1 << 4,
+};
+
+#define BCH_SB_ERRS() \
+ x(clean_but_journal_not_empty, 0, 0) \
+ x(dirty_but_no_journal_entries, 1, 0) \
+ x(dirty_but_no_journal_entries_post_drop_nonflushes, 2, 0) \
+ x(sb_clean_journal_seq_mismatch, 3, 0) \
+ x(sb_clean_btree_root_mismatch, 4, 0) \
+ x(sb_clean_missing, 5, 0) \
+ x(jset_unsupported_version, 6, 0) \
+ x(jset_unknown_csum, 7, 0) \
+ x(jset_last_seq_newer_than_seq, 8, 0) \
+ x(jset_past_bucket_end, 9, 0) \
+ x(jset_seq_blacklisted, 10, 0) \
+ x(journal_entries_missing, 11, 0) \
+ x(journal_entry_replicas_not_marked, 12, 0) \
+ x(journal_entry_past_jset_end, 13, 0) \
+ x(journal_entry_replicas_data_mismatch, 14, 0) \
+ x(journal_entry_bkey_u64s_0, 15, 0) \
+ x(journal_entry_bkey_past_end, 16, 0) \
+ x(journal_entry_bkey_bad_format, 17, 0) \
+ x(journal_entry_bkey_invalid, 18, 0) \
+ x(journal_entry_btree_root_bad_size, 19, 0) \
+ x(journal_entry_blacklist_bad_size, 20, 0) \
+ x(journal_entry_blacklist_v2_bad_size, 21, 0) \
+ x(journal_entry_blacklist_v2_start_past_end, 22, 0) \
+ x(journal_entry_usage_bad_size, 23, 0) \
+ x(journal_entry_data_usage_bad_size, 24, 0) \
+ x(journal_entry_clock_bad_size, 25, 0) \
+ x(journal_entry_clock_bad_rw, 26, 0) \
+ x(journal_entry_dev_usage_bad_size, 27, 0) \
+ x(journal_entry_dev_usage_bad_dev, 28, 0) \
+ x(journal_entry_dev_usage_bad_pad, 29, 0) \
+ x(btree_node_unreadable, 30, 0) \
+ x(btree_node_fault_injected, 31, 0) \
+ x(btree_node_bad_magic, 32, 0) \
+ x(btree_node_bad_seq, 33, 0) \
+ x(btree_node_unsupported_version, 34, 0) \
+ x(btree_node_bset_older_than_sb_min, 35, 0) \
+ x(btree_node_bset_newer_than_sb, 36, 0) \
+ x(btree_node_data_missing, 37, 0) \
+ x(btree_node_bset_after_end, 38, 0) \
+ x(btree_node_replicas_sectors_written_mismatch, 39, 0) \
+ x(btree_node_replicas_data_mismatch, 40, 0) \
+ x(bset_unknown_csum, 41, 0) \
+ x(bset_bad_csum, 42, 0) \
+ x(bset_past_end_of_btree_node, 43, 0) \
+ x(bset_wrong_sector_offset, 44, 0) \
+ x(bset_empty, 45, 0) \
+ x(bset_bad_seq, 46, 0) \
+ x(bset_blacklisted_journal_seq, 47, 0) \
+ x(first_bset_blacklisted_journal_seq, 48, 0) \
+ x(btree_node_bad_btree, 49, 0) \
+ x(btree_node_bad_level, 50, 0) \
+ x(btree_node_bad_min_key, 51, 0) \
+ x(btree_node_bad_max_key, 52, 0) \
+ x(btree_node_bad_format, 53, 0) \
+ x(btree_node_bkey_past_bset_end, 54, 0) \
+ x(btree_node_bkey_bad_format, 55, 0) \
+ x(btree_node_bad_bkey, 56, 0) \
+ x(btree_node_bkey_out_of_order, 57, 0) \
+ x(btree_root_bkey_invalid, 58, 0) \
+ x(btree_root_read_error, 59, 0) \
+ x(btree_root_bad_min_key, 60, 0) \
+ x(btree_root_bad_max_key, 61, 0) \
+ x(btree_node_read_error, 62, 0) \
+ x(btree_node_topology_bad_min_key, 63, 0) \
+ x(btree_node_topology_bad_max_key, 64, 0) \
+ x(btree_node_topology_overwritten_by_prev_node, 65, 0) \
+ x(btree_node_topology_overwritten_by_next_node, 66, 0) \
+ x(btree_node_topology_interior_node_empty, 67, 0) \
+ x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \
+ x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \
+ x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \
+ x(fs_usage_cached_wrong, 71, FSCK_AUTOFIX) \
+ x(fs_usage_reserved_wrong, 72, FSCK_AUTOFIX) \
+ x(fs_usage_persistent_reserved_wrong, 73, FSCK_AUTOFIX) \
+ x(fs_usage_nr_inodes_wrong, 74, FSCK_AUTOFIX) \
+ x(fs_usage_replicas_wrong, 75, FSCK_AUTOFIX) \
+ x(dev_usage_buckets_wrong, 76, FSCK_AUTOFIX) \
+ x(dev_usage_sectors_wrong, 77, FSCK_AUTOFIX) \
+ x(dev_usage_fragmented_wrong, 78, FSCK_AUTOFIX) \
+ x(dev_usage_buckets_ec_wrong, 79, FSCK_AUTOFIX) \
+ x(bkey_version_in_future, 80, 0) \
+ x(bkey_u64s_too_small, 81, 0) \
+ x(bkey_invalid_type_for_btree, 82, 0) \
+ x(bkey_extent_size_zero, 83, 0) \
+ x(bkey_extent_size_greater_than_offset, 84, 0) \
+ x(bkey_size_nonzero, 85, 0) \
+ x(bkey_snapshot_nonzero, 86, 0) \
+ x(bkey_snapshot_zero, 87, 0) \
+ x(bkey_at_pos_max, 88, 0) \
+ x(bkey_before_start_of_btree_node, 89, 0) \
+ x(bkey_after_end_of_btree_node, 90, 0) \
+ x(bkey_val_size_nonzero, 91, 0) \
+ x(bkey_val_size_too_small, 92, 0) \
+ x(alloc_v1_val_size_bad, 93, 0) \
+ x(alloc_v2_unpack_error, 94, 0) \
+ x(alloc_v3_unpack_error, 95, 0) \
+ x(alloc_v4_val_size_bad, 96, 0) \
+ x(alloc_v4_backpointers_start_bad, 97, 0) \
+ x(alloc_key_data_type_bad, 98, 0) \
+ x(alloc_key_empty_but_have_data, 99, 0) \
+ x(alloc_key_dirty_sectors_0, 100, 0) \
+ x(alloc_key_data_type_inconsistency, 101, 0) \
+ x(alloc_key_to_missing_dev_bucket, 102, 0) \
+ x(alloc_key_cached_inconsistency, 103, 0) \
+ x(alloc_key_cached_but_read_time_zero, 104, 0) \
+ x(alloc_key_to_missing_lru_entry, 105, 0) \
+ x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \
+ x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \
+ x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \
+ x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \
+ x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \
+ x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \
+ x(bucket_sector_count_overflow, 112, 0) \
+ x(bucket_metadata_type_mismatch, 113, 0) \
+ x(need_discard_key_wrong, 114, 0) \
+ x(freespace_key_wrong, 115, 0) \
+ x(freespace_hole_missing, 116, 0) \
+ x(bucket_gens_val_size_bad, 117, 0) \
+ x(bucket_gens_key_wrong, 118, 0) \
+ x(bucket_gens_hole_wrong, 119, 0) \
+ x(bucket_gens_to_invalid_dev, 120, 0) \
+ x(bucket_gens_to_invalid_buckets, 121, 0) \
+ x(bucket_gens_nonzero_for_invalid_buckets, 122, 0) \
+ x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \
+ x(need_discard_freespace_key_bad, 124, 0) \
+ x(backpointer_bucket_offset_wrong, 125, 0) \
+ x(backpointer_to_missing_device, 126, 0) \
+ x(backpointer_to_missing_alloc, 127, 0) \
+ x(backpointer_to_missing_ptr, 128, 0) \
+ x(lru_entry_at_time_0, 129, 0) \
+ x(lru_entry_to_invalid_bucket, 130, 0) \
+ x(lru_entry_bad, 131, 0) \
+ x(btree_ptr_val_too_big, 132, 0) \
+ x(btree_ptr_v2_val_too_big, 133, 0) \
+ x(btree_ptr_has_non_ptr, 134, 0) \
+ x(extent_ptrs_invalid_entry, 135, 0) \
+ x(extent_ptrs_no_ptrs, 136, 0) \
+ x(extent_ptrs_too_many_ptrs, 137, 0) \
+ x(extent_ptrs_redundant_crc, 138, 0) \
+ x(extent_ptrs_redundant_stripe, 139, 0) \
+ x(extent_ptrs_unwritten, 140, 0) \
+ x(extent_ptrs_written_and_unwritten, 141, 0) \
+ x(ptr_to_invalid_device, 142, 0) \
+ x(ptr_to_duplicate_device, 143, 0) \
+ x(ptr_after_last_bucket, 144, 0) \
+ x(ptr_before_first_bucket, 145, 0) \
+ x(ptr_spans_multiple_buckets, 146, 0) \
+ x(ptr_to_missing_backpointer, 147, 0) \
+ x(ptr_to_missing_alloc_key, 148, 0) \
+ x(ptr_to_missing_replicas_entry, 149, 0) \
+ x(ptr_to_missing_stripe, 150, 0) \
+ x(ptr_to_incorrect_stripe, 151, 0) \
+ x(ptr_gen_newer_than_bucket_gen, 152, 0) \
+ x(ptr_too_stale, 153, 0) \
+ x(stale_dirty_ptr, 154, 0) \
+ x(ptr_bucket_data_type_mismatch, 155, 0) \
+ x(ptr_cached_and_erasure_coded, 156, 0) \
+ x(ptr_crc_uncompressed_size_too_small, 157, 0) \
+ x(ptr_crc_csum_type_unknown, 158, 0) \
+ x(ptr_crc_compression_type_unknown, 159, 0) \
+ x(ptr_crc_redundant, 160, 0) \
+ x(ptr_crc_uncompressed_size_too_big, 161, 0) \
+ x(ptr_crc_nonce_mismatch, 162, 0) \
+ x(ptr_stripe_redundant, 163, 0) \
+ x(reservation_key_nr_replicas_invalid, 164, 0) \
+ x(reflink_v_refcount_wrong, 165, 0) \
+ x(reflink_p_to_missing_reflink_v, 166, 0) \
+ x(stripe_pos_bad, 167, 0) \
+ x(stripe_val_size_bad, 168, 0) \
+ x(stripe_sector_count_wrong, 169, 0) \
+ x(snapshot_tree_pos_bad, 170, 0) \
+ x(snapshot_tree_to_missing_snapshot, 171, 0) \
+ x(snapshot_tree_to_missing_subvol, 172, 0) \
+ x(snapshot_tree_to_wrong_subvol, 173, 0) \
+ x(snapshot_tree_to_snapshot_subvol, 174, 0) \
+ x(snapshot_pos_bad, 175, 0) \
+ x(snapshot_parent_bad, 176, 0) \
+ x(snapshot_children_not_normalized, 177, 0) \
+ x(snapshot_child_duplicate, 178, 0) \
+ x(snapshot_child_bad, 179, 0) \
+ x(snapshot_skiplist_not_normalized, 180, 0) \
+ x(snapshot_skiplist_bad, 181, 0) \
+ x(snapshot_should_not_have_subvol, 182, 0) \
+ x(snapshot_to_bad_snapshot_tree, 183, 0) \
+ x(snapshot_bad_depth, 184, 0) \
+ x(snapshot_bad_skiplist, 185, 0) \
+ x(subvol_pos_bad, 186, 0) \
+ x(subvol_not_master_and_not_snapshot, 187, 0) \
+ x(subvol_to_missing_root, 188, 0) \
+ x(subvol_root_wrong_bi_subvol, 189, 0) \
+ x(bkey_in_missing_snapshot, 190, 0) \
+ x(inode_pos_inode_nonzero, 191, 0) \
+ x(inode_pos_blockdev_range, 192, 0) \
+ x(inode_unpack_error, 193, 0) \
+ x(inode_str_hash_invalid, 194, 0) \
+ x(inode_v3_fields_start_bad, 195, 0) \
+ x(inode_snapshot_mismatch, 196, 0) \
+ x(inode_unlinked_but_clean, 197, 0) \
+ x(inode_unlinked_but_nlink_nonzero, 198, 0) \
+ x(inode_checksum_type_invalid, 199, 0) \
+ x(inode_compression_type_invalid, 200, 0) \
+ x(inode_subvol_root_but_not_dir, 201, 0) \
+ x(inode_i_size_dirty_but_clean, 202, 0) \
+ x(inode_i_sectors_dirty_but_clean, 203, 0) \
+ x(inode_i_sectors_wrong, 204, 0) \
+ x(inode_dir_wrong_nlink, 205, 0) \
+ x(inode_dir_multiple_links, 206, 0) \
+ x(inode_multiple_links_but_nlink_0, 207, 0) \
+ x(inode_wrong_backpointer, 208, 0) \
+ x(inode_wrong_nlink, 209, 0) \
+ x(inode_unreachable, 210, 0) \
+ x(deleted_inode_but_clean, 211, 0) \
+ x(deleted_inode_missing, 212, 0) \
+ x(deleted_inode_is_dir, 213, 0) \
+ x(deleted_inode_not_unlinked, 214, 0) \
+ x(extent_overlapping, 215, 0) \
+ x(extent_in_missing_inode, 216, 0) \
+ x(extent_in_non_reg_inode, 217, 0) \
+ x(extent_past_end_of_inode, 218, 0) \
+ x(dirent_empty_name, 219, 0) \
+ x(dirent_val_too_big, 220, 0) \
+ x(dirent_name_too_long, 221, 0) \
+ x(dirent_name_embedded_nul, 222, 0) \
+ x(dirent_name_dot_or_dotdot, 223, 0) \
+ x(dirent_name_has_slash, 224, 0) \
+ x(dirent_d_type_wrong, 225, 0) \
+ x(inode_bi_parent_wrong, 226, 0) \
+ x(dirent_in_missing_dir_inode, 227, 0) \
+ x(dirent_in_non_dir_inode, 228, 0) \
+ x(dirent_to_missing_inode, 229, 0) \
+ x(dirent_to_missing_subvol, 230, 0) \
+ x(dirent_to_itself, 231, 0) \
+ x(quota_type_invalid, 232, 0) \
+ x(xattr_val_size_too_small, 233, 0) \
+ x(xattr_val_size_too_big, 234, 0) \
+ x(xattr_invalid_type, 235, 0) \
+ x(xattr_name_invalid_chars, 236, 0) \
+ x(xattr_in_missing_inode, 237, 0) \
+ x(root_subvol_missing, 238, 0) \
+ x(root_dir_missing, 239, 0) \
+ x(root_inode_not_dir, 240, 0) \
+ x(dir_loop, 241, 0) \
+ x(hash_table_key_duplicate, 242, 0) \
+ x(hash_table_key_wrong_offset, 243, 0) \
+ x(unlinked_inode_not_on_deleted_list, 244, 0) \
+ x(reflink_p_front_pad_bad, 245, 0) \
+ x(journal_entry_dup_same_device, 246, 0) \
+ x(inode_bi_subvol_missing, 247, 0) \
+ x(inode_bi_subvol_wrong, 248, 0) \
+ x(inode_points_to_missing_dirent, 249, 0) \
+ x(inode_points_to_wrong_dirent, 250, 0) \
+ x(inode_bi_parent_nonzero, 251, 0) \
+ x(dirent_to_missing_parent_subvol, 252, 0) \
+ x(dirent_not_visible_in_parent_subvol, 253, 0) \
+ x(subvol_fs_path_parent_wrong, 254, 0) \
+ x(subvol_root_fs_path_parent_nonzero, 255, 0) \
+ x(subvol_children_not_set, 256, 0) \
+ x(subvol_children_bad, 257, 0) \
+ x(subvol_loop, 258, 0) \
+ x(subvol_unreachable, 259, 0) \
+ x(btree_node_bkey_bad_u64s, 260, 0) \
+ x(btree_node_topology_empty_interior_node, 261, 0) \
+ x(btree_ptr_v2_min_key_bad, 262, 0) \
+ x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \
+ x(snapshot_node_missing, 264, 0) \
+ x(dup_backpointer_to_bad_csum_extent, 265, 0) \
+ x(btree_bitmap_not_marked, 266, 0) \
+ x(sb_clean_entry_overrun, 267, 0) \
+ x(btree_ptr_v2_written_0, 268, 0) \
+ x(subvol_snapshot_bad, 269, 0) \
+ x(subvol_inode_bad, 270, 0) \
+ x(alloc_key_stripe_sectors_wrong, 271, 0) \
+ x(accounting_mismatch, 272, 0) \
+ x(accounting_replicas_not_marked, 273, 0) \
+ x(invalid_btree_id, 274, 0) \
+ x(alloc_key_io_time_bad, 275, 0)
+
+enum bch_sb_error_id {
+#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
+ BCH_SB_ERRS()
+#undef x
+ BCH_SB_ERR_MAX
+};
+
+struct bch_sb_field_errors {
+ struct bch_sb_field field;
+ struct bch_sb_field_error_entry {
+ __le64 v;
+ __le64 last_error_time;
+ } entries[];
+};
+
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
+
+#endif /* _BCACHEFS_SB_ERRORS_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
index 666599d3fb9d..40325239c3b0 100644
--- a/fs/bcachefs/sb-errors_types.h
+++ b/fs/bcachefs/sb-errors_types.h
@@ -4,286 +4,6 @@
#include "darray.h"
-#define BCH_SB_ERRS() \
- x(clean_but_journal_not_empty, 0) \
- x(dirty_but_no_journal_entries, 1) \
- x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \
- x(sb_clean_journal_seq_mismatch, 3) \
- x(sb_clean_btree_root_mismatch, 4) \
- x(sb_clean_missing, 5) \
- x(jset_unsupported_version, 6) \
- x(jset_unknown_csum, 7) \
- x(jset_last_seq_newer_than_seq, 8) \
- x(jset_past_bucket_end, 9) \
- x(jset_seq_blacklisted, 10) \
- x(journal_entries_missing, 11) \
- x(journal_entry_replicas_not_marked, 12) \
- x(journal_entry_past_jset_end, 13) \
- x(journal_entry_replicas_data_mismatch, 14) \
- x(journal_entry_bkey_u64s_0, 15) \
- x(journal_entry_bkey_past_end, 16) \
- x(journal_entry_bkey_bad_format, 17) \
- x(journal_entry_bkey_invalid, 18) \
- x(journal_entry_btree_root_bad_size, 19) \
- x(journal_entry_blacklist_bad_size, 20) \
- x(journal_entry_blacklist_v2_bad_size, 21) \
- x(journal_entry_blacklist_v2_start_past_end, 22) \
- x(journal_entry_usage_bad_size, 23) \
- x(journal_entry_data_usage_bad_size, 24) \
- x(journal_entry_clock_bad_size, 25) \
- x(journal_entry_clock_bad_rw, 26) \
- x(journal_entry_dev_usage_bad_size, 27) \
- x(journal_entry_dev_usage_bad_dev, 28) \
- x(journal_entry_dev_usage_bad_pad, 29) \
- x(btree_node_unreadable, 30) \
- x(btree_node_fault_injected, 31) \
- x(btree_node_bad_magic, 32) \
- x(btree_node_bad_seq, 33) \
- x(btree_node_unsupported_version, 34) \
- x(btree_node_bset_older_than_sb_min, 35) \
- x(btree_node_bset_newer_than_sb, 36) \
- x(btree_node_data_missing, 37) \
- x(btree_node_bset_after_end, 38) \
- x(btree_node_replicas_sectors_written_mismatch, 39) \
- x(btree_node_replicas_data_mismatch, 40) \
- x(bset_unknown_csum, 41) \
- x(bset_bad_csum, 42) \
- x(bset_past_end_of_btree_node, 43) \
- x(bset_wrong_sector_offset, 44) \
- x(bset_empty, 45) \
- x(bset_bad_seq, 46) \
- x(bset_blacklisted_journal_seq, 47) \
- x(first_bset_blacklisted_journal_seq, 48) \
- x(btree_node_bad_btree, 49) \
- x(btree_node_bad_level, 50) \
- x(btree_node_bad_min_key, 51) \
- x(btree_node_bad_max_key, 52) \
- x(btree_node_bad_format, 53) \
- x(btree_node_bkey_past_bset_end, 54) \
- x(btree_node_bkey_bad_format, 55) \
- x(btree_node_bad_bkey, 56) \
- x(btree_node_bkey_out_of_order, 57) \
- x(btree_root_bkey_invalid, 58) \
- x(btree_root_read_error, 59) \
- x(btree_root_bad_min_key, 60) \
- x(btree_root_bad_max_key, 61) \
- x(btree_node_read_error, 62) \
- x(btree_node_topology_bad_min_key, 63) \
- x(btree_node_topology_bad_max_key, 64) \
- x(btree_node_topology_overwritten_by_prev_node, 65) \
- x(btree_node_topology_overwritten_by_next_node, 66) \
- x(btree_node_topology_interior_node_empty, 67) \
- x(fs_usage_hidden_wrong, 68) \
- x(fs_usage_btree_wrong, 69) \
- x(fs_usage_data_wrong, 70) \
- x(fs_usage_cached_wrong, 71) \
- x(fs_usage_reserved_wrong, 72) \
- x(fs_usage_persistent_reserved_wrong, 73) \
- x(fs_usage_nr_inodes_wrong, 74) \
- x(fs_usage_replicas_wrong, 75) \
- x(dev_usage_buckets_wrong, 76) \
- x(dev_usage_sectors_wrong, 77) \
- x(dev_usage_fragmented_wrong, 78) \
- x(dev_usage_buckets_ec_wrong, 79) \
- x(bkey_version_in_future, 80) \
- x(bkey_u64s_too_small, 81) \
- x(bkey_invalid_type_for_btree, 82) \
- x(bkey_extent_size_zero, 83) \
- x(bkey_extent_size_greater_than_offset, 84) \
- x(bkey_size_nonzero, 85) \
- x(bkey_snapshot_nonzero, 86) \
- x(bkey_snapshot_zero, 87) \
- x(bkey_at_pos_max, 88) \
- x(bkey_before_start_of_btree_node, 89) \
- x(bkey_after_end_of_btree_node, 90) \
- x(bkey_val_size_nonzero, 91) \
- x(bkey_val_size_too_small, 92) \
- x(alloc_v1_val_size_bad, 93) \
- x(alloc_v2_unpack_error, 94) \
- x(alloc_v3_unpack_error, 95) \
- x(alloc_v4_val_size_bad, 96) \
- x(alloc_v4_backpointers_start_bad, 97) \
- x(alloc_key_data_type_bad, 98) \
- x(alloc_key_empty_but_have_data, 99) \
- x(alloc_key_dirty_sectors_0, 100) \
- x(alloc_key_data_type_inconsistency, 101) \
- x(alloc_key_to_missing_dev_bucket, 102) \
- x(alloc_key_cached_inconsistency, 103) \
- x(alloc_key_cached_but_read_time_zero, 104) \
- x(alloc_key_to_missing_lru_entry, 105) \
- x(alloc_key_data_type_wrong, 106) \
- x(alloc_key_gen_wrong, 107) \
- x(alloc_key_dirty_sectors_wrong, 108) \
- x(alloc_key_cached_sectors_wrong, 109) \
- x(alloc_key_stripe_wrong, 110) \
- x(alloc_key_stripe_redundancy_wrong, 111) \
- x(bucket_sector_count_overflow, 112) \
- x(bucket_metadata_type_mismatch, 113) \
- x(need_discard_key_wrong, 114) \
- x(freespace_key_wrong, 115) \
- x(freespace_hole_missing, 116) \
- x(bucket_gens_val_size_bad, 117) \
- x(bucket_gens_key_wrong, 118) \
- x(bucket_gens_hole_wrong, 119) \
- x(bucket_gens_to_invalid_dev, 120) \
- x(bucket_gens_to_invalid_buckets, 121) \
- x(bucket_gens_nonzero_for_invalid_buckets, 122) \
- x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
- x(need_discard_freespace_key_bad, 124) \
- x(backpointer_bucket_offset_wrong, 125) \
- x(backpointer_to_missing_device, 126) \
- x(backpointer_to_missing_alloc, 127) \
- x(backpointer_to_missing_ptr, 128) \
- x(lru_entry_at_time_0, 129) \
- x(lru_entry_to_invalid_bucket, 130) \
- x(lru_entry_bad, 131) \
- x(btree_ptr_val_too_big, 132) \
- x(btree_ptr_v2_val_too_big, 133) \
- x(btree_ptr_has_non_ptr, 134) \
- x(extent_ptrs_invalid_entry, 135) \
- x(extent_ptrs_no_ptrs, 136) \
- x(extent_ptrs_too_many_ptrs, 137) \
- x(extent_ptrs_redundant_crc, 138) \
- x(extent_ptrs_redundant_stripe, 139) \
- x(extent_ptrs_unwritten, 140) \
- x(extent_ptrs_written_and_unwritten, 141) \
- x(ptr_to_invalid_device, 142) \
- x(ptr_to_duplicate_device, 143) \
- x(ptr_after_last_bucket, 144) \
- x(ptr_before_first_bucket, 145) \
- x(ptr_spans_multiple_buckets, 146) \
- x(ptr_to_missing_backpointer, 147) \
- x(ptr_to_missing_alloc_key, 148) \
- x(ptr_to_missing_replicas_entry, 149) \
- x(ptr_to_missing_stripe, 150) \
- x(ptr_to_incorrect_stripe, 151) \
- x(ptr_gen_newer_than_bucket_gen, 152) \
- x(ptr_too_stale, 153) \
- x(stale_dirty_ptr, 154) \
- x(ptr_bucket_data_type_mismatch, 155) \
- x(ptr_cached_and_erasure_coded, 156) \
- x(ptr_crc_uncompressed_size_too_small, 157) \
- x(ptr_crc_csum_type_unknown, 158) \
- x(ptr_crc_compression_type_unknown, 159) \
- x(ptr_crc_redundant, 160) \
- x(ptr_crc_uncompressed_size_too_big, 161) \
- x(ptr_crc_nonce_mismatch, 162) \
- x(ptr_stripe_redundant, 163) \
- x(reservation_key_nr_replicas_invalid, 164) \
- x(reflink_v_refcount_wrong, 165) \
- x(reflink_p_to_missing_reflink_v, 166) \
- x(stripe_pos_bad, 167) \
- x(stripe_val_size_bad, 168) \
- x(stripe_sector_count_wrong, 169) \
- x(snapshot_tree_pos_bad, 170) \
- x(snapshot_tree_to_missing_snapshot, 171) \
- x(snapshot_tree_to_missing_subvol, 172) \
- x(snapshot_tree_to_wrong_subvol, 173) \
- x(snapshot_tree_to_snapshot_subvol, 174) \
- x(snapshot_pos_bad, 175) \
- x(snapshot_parent_bad, 176) \
- x(snapshot_children_not_normalized, 177) \
- x(snapshot_child_duplicate, 178) \
- x(snapshot_child_bad, 179) \
- x(snapshot_skiplist_not_normalized, 180) \
- x(snapshot_skiplist_bad, 181) \
- x(snapshot_should_not_have_subvol, 182) \
- x(snapshot_to_bad_snapshot_tree, 183) \
- x(snapshot_bad_depth, 184) \
- x(snapshot_bad_skiplist, 185) \
- x(subvol_pos_bad, 186) \
- x(subvol_not_master_and_not_snapshot, 187) \
- x(subvol_to_missing_root, 188) \
- x(subvol_root_wrong_bi_subvol, 189) \
- x(bkey_in_missing_snapshot, 190) \
- x(inode_pos_inode_nonzero, 191) \
- x(inode_pos_blockdev_range, 192) \
- x(inode_unpack_error, 193) \
- x(inode_str_hash_invalid, 194) \
- x(inode_v3_fields_start_bad, 195) \
- x(inode_snapshot_mismatch, 196) \
- x(inode_unlinked_but_clean, 197) \
- x(inode_unlinked_but_nlink_nonzero, 198) \
- x(inode_checksum_type_invalid, 199) \
- x(inode_compression_type_invalid, 200) \
- x(inode_subvol_root_but_not_dir, 201) \
- x(inode_i_size_dirty_but_clean, 202) \
- x(inode_i_sectors_dirty_but_clean, 203) \
- x(inode_i_sectors_wrong, 204) \
- x(inode_dir_wrong_nlink, 205) \
- x(inode_dir_multiple_links, 206) \
- x(inode_multiple_links_but_nlink_0, 207) \
- x(inode_wrong_backpointer, 208) \
- x(inode_wrong_nlink, 209) \
- x(inode_unreachable, 210) \
- x(deleted_inode_but_clean, 211) \
- x(deleted_inode_missing, 212) \
- x(deleted_inode_is_dir, 213) \
- x(deleted_inode_not_unlinked, 214) \
- x(extent_overlapping, 215) \
- x(extent_in_missing_inode, 216) \
- x(extent_in_non_reg_inode, 217) \
- x(extent_past_end_of_inode, 218) \
- x(dirent_empty_name, 219) \
- x(dirent_val_too_big, 220) \
- x(dirent_name_too_long, 221) \
- x(dirent_name_embedded_nul, 222) \
- x(dirent_name_dot_or_dotdot, 223) \
- x(dirent_name_has_slash, 224) \
- x(dirent_d_type_wrong, 225) \
- x(inode_bi_parent_wrong, 226) \
- x(dirent_in_missing_dir_inode, 227) \
- x(dirent_in_non_dir_inode, 228) \
- x(dirent_to_missing_inode, 229) \
- x(dirent_to_missing_subvol, 230) \
- x(dirent_to_itself, 231) \
- x(quota_type_invalid, 232) \
- x(xattr_val_size_too_small, 233) \
- x(xattr_val_size_too_big, 234) \
- x(xattr_invalid_type, 235) \
- x(xattr_name_invalid_chars, 236) \
- x(xattr_in_missing_inode, 237) \
- x(root_subvol_missing, 238) \
- x(root_dir_missing, 239) \
- x(root_inode_not_dir, 240) \
- x(dir_loop, 241) \
- x(hash_table_key_duplicate, 242) \
- x(hash_table_key_wrong_offset, 243) \
- x(unlinked_inode_not_on_deleted_list, 244) \
- x(reflink_p_front_pad_bad, 245) \
- x(journal_entry_dup_same_device, 246) \
- x(inode_bi_subvol_missing, 247) \
- x(inode_bi_subvol_wrong, 248) \
- x(inode_points_to_missing_dirent, 249) \
- x(inode_points_to_wrong_dirent, 250) \
- x(inode_bi_parent_nonzero, 251) \
- x(dirent_to_missing_parent_subvol, 252) \
- x(dirent_not_visible_in_parent_subvol, 253) \
- x(subvol_fs_path_parent_wrong, 254) \
- x(subvol_root_fs_path_parent_nonzero, 255) \
- x(subvol_children_not_set, 256) \
- x(subvol_children_bad, 257) \
- x(subvol_loop, 258) \
- x(subvol_unreachable, 259) \
- x(btree_node_bkey_bad_u64s, 260) \
- x(btree_node_topology_empty_interior_node, 261) \
- x(btree_ptr_v2_min_key_bad, 262) \
- x(btree_root_unreadable_and_scan_found_nothing, 263) \
- x(snapshot_node_missing, 264) \
- x(dup_backpointer_to_bad_csum_extent, 265) \
- x(btree_bitmap_not_marked, 266) \
- x(sb_clean_entry_overrun, 267) \
- x(btree_ptr_v2_written_0, 268) \
- x(subvol_snapshot_bad, 269) \
- x(subvol_inode_bad, 270)
-
-enum bch_sb_error_id {
-#define x(t, n) BCH_FSCK_ERR_##t = n,
- BCH_SB_ERRS()
-#undef x
- BCH_SB_ERR_MAX
-};
-
struct bch_sb_error_entry_cpu {
u64 id:16,
nr:48;
@@ -293,4 +13,3 @@ struct bch_sb_error_entry_cpu {
typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
#endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
-
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
new file mode 100644
index 000000000000..e2630548c0f6
--- /dev/null
+++ b/fs/bcachefs/sb-members_format.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_MEMBERS_FORMAT_H
+#define _BCACHEFS_SB_MEMBERS_FORMAT_H
+
+/*
+ * We refer to members with bitmasks in various places - but we need to get rid
+ * of this limit:
+ */
+#define BCH_SB_MEMBERS_MAX 64
+
+#define BCH_MIN_NR_NBUCKETS (1 << 6)
+
+#define BCH_IOPS_MEASUREMENTS() \
+ x(seqread, 0) \
+ x(seqwrite, 1) \
+ x(randread, 2) \
+ x(randwrite, 3)
+
+enum bch_iops_measurement {
+#define x(t, n) BCH_IOPS_##t = n,
+ BCH_IOPS_MEASUREMENTS()
+#undef x
+ BCH_IOPS_NR
+};
+
+#define BCH_MEMBER_ERROR_TYPES() \
+ x(read, 0) \
+ x(write, 1) \
+ x(checksum, 2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+ BCH_MEMBER_ERROR_TYPES()
+#undef x
+ BCH_MEMBER_ERROR_NR
+};
+
+struct bch_member {
+ __uuid_t uuid;
+ __le64 nbuckets; /* device size */
+ __le16 first_bucket; /* index of first bucket used */
+ __le16 bucket_size; /* sectors */
+ __u8 btree_bitmap_shift;
+ __u8 pad[3];
+ __le64 last_mount; /* time_t */
+
+ __le64 flags;
+ __le32 iops[4];
+ __le64 errors[BCH_MEMBER_ERROR_NR];
+ __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
+ __le64 errors_reset_time;
+ __le64 seq;
+ __le64 btree_allocated_bitmap;
+ /*
+ * On recovery from a clean shutdown we don't normally read the journal,
+ * but we still want to resume writing from where we left off so we
+ * don't overwrite more than is necessary, for list journal debugging:
+ */
+ __le32 last_journal_bucket;
+ __le32 last_journal_bucket_offset;
+};
+
+/*
+ * This limit comes from the bucket_gens array - it's a single allocation, and
+ * kernel allocation are limited to INT_MAX
+ */
+#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
+
+#define BCH_MEMBER_V1_BYTES 56
+
+LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
+LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
+LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
+LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
+LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+ struct bch_member, flags, 30, 31)
+
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
+
+#define BCH_MEMBER_STATES() \
+ x(rw, 0) \
+ x(ro, 1) \
+ x(failed, 2) \
+ x(spare, 3)
+
+enum bch_member_state {
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+ BCH_MEMBER_STATES()
+#undef x
+ BCH_MEMBER_STATE_NR
+};
+
+struct bch_sb_field_members_v1 {
+ struct bch_sb_field field;
+ struct bch_member _members[]; //Members are now variable size
+};
+
+struct bch_sb_field_members_v2 {
+ struct bch_sb_field field;
+ __le16 member_bytes; //size of single member entry
+ u8 pad[6];
+ struct bch_member _members[];
+};
+
+#endif /* _BCACHEFS_SB_MEMBERS_FORMAT_H */
diff --git a/fs/bcachefs/seqmutex.h b/fs/bcachefs/seqmutex.h
index c1860d8163fb..c4b3d8d3f414 100644
--- a/fs/bcachefs/seqmutex.h
+++ b/fs/bcachefs/seqmutex.h
@@ -19,17 +19,14 @@ static inline bool seqmutex_trylock(struct seqmutex *lock)
static inline void seqmutex_lock(struct seqmutex *lock)
{
mutex_lock(&lock->lock);
-}
-
-static inline void seqmutex_unlock(struct seqmutex *lock)
-{
lock->seq++;
- mutex_unlock(&lock->lock);
}
-static inline u32 seqmutex_seq(struct seqmutex *lock)
+static inline u32 seqmutex_unlock(struct seqmutex *lock)
{
- return lock->seq;
+ u32 seq = lock->seq;
+ mutex_unlock(&lock->lock);
+ return seq;
}
static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 629900a5e641..24023d6a9698 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -168,6 +168,9 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
+ if (unlikely(new_bytes > INT_MAX))
+ return NULL;
+
new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new)
return NULL;
@@ -1042,6 +1045,25 @@ err:
return ret;
}
+int bch2_check_key_has_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
+ bkey_in_missing_snapshot,
+ "key in missing snapshot %s, delete?",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_internal_snapshot_node) ?: 1;
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
/*
* Mark a snapshot as deleted, for future cleanup:
*/
@@ -1351,35 +1373,39 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
* that key to snapshot leaf nodes, where we can mutate it
*/
-static int snapshot_delete_key(struct btree_trans *trans,
+static int delete_dead_snapshots_process_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
snapshot_id_list *deleted,
snapshot_id_list *equiv_seen,
struct bpos *last_pos)
{
+ int ret = bch2_check_key_has_snapshot(trans, iter, k);
+ if (ret)
+ return ret < 0 ? ret : 0;
+
struct bch_fs *c = trans->c;
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+ if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
+ return 0;
if (!bkey_eq(k.k->p, *last_pos))
equiv_seen->nr = 0;
- *last_pos = k.k->p;
- if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
- snapshot_list_has_id(equiv_seen, equiv)) {
+ if (snapshot_list_has_id(deleted, k.k->p.snapshot))
return bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node);
- } else {
- return snapshot_list_add(c, equiv_seen, equiv);
- }
-}
-static int move_key_to_correct_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+ if (!bpos_eq(*last_pos, k.k->p) &&
+ snapshot_list_has_id(equiv_seen, equiv))
+ return bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_internal_snapshot_node);
+
+ *last_pos = k.k->p;
+
+ ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
+ if (ret)
+ return ret;
/*
* When we have a linear chain of snapshot nodes, we consider
@@ -1389,21 +1415,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
*
* If there are multiple keys in different snapshots at the same
* position, we're only going to keep the one in the newest
- * snapshot - the rest have been overwritten and are redundant,
- * and for the key we're going to keep we need to move it to the
- * equivalance class ID if it's not there already.
+ * snapshot (we delete the others above) - the rest have been
+ * overwritten and are redundant, and for the key we're going to keep we
+ * need to move it to the equivalance class ID if it's not there
+ * already.
*/
if (equiv != k.k->p.snapshot) {
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- struct btree_iter new_iter;
- int ret;
-
- ret = PTR_ERR_OR_ZERO(new);
+ int ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
new->k.p.snapshot = equiv;
+ struct btree_iter new_iter;
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
BTREE_ITER_all_snapshots|
BTREE_ITER_cached|
@@ -1538,19 +1563,11 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
struct btree_trans *trans;
snapshot_id_list deleted = { 0 };
snapshot_id_list deleted_interior = { 0 };
- u32 id;
int ret = 0;
if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
return 0;
- if (!test_bit(BCH_FS_started, &c->flags)) {
- ret = bch2_fs_read_write_early(c);
- bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
- if (ret)
- return ret;
- }
-
trans = bch2_trans_get(c);
/*
@@ -1585,33 +1602,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
if (ret)
goto err;
- for (id = 0; id < BTREE_ID_NR; id++) {
+ for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
struct bpos last_pos = POS_MIN;
snapshot_id_list equiv_seen = { 0 };
struct disk_reservation res = { 0 };
- if (!btree_type_has_snapshots(id))
- continue;
-
- /*
- * deleted inodes btree is maintained by a trigger on the inodes
- * btree - no work for us to do here, and it's not safe to scan
- * it because we'll see out of date keys due to the btree write
- * buffer:
- */
- if (id == BTREE_ID_deleted_inodes)
+ if (!btree_type_has_snapshots(btree))
continue;
ret = for_each_btree_key_commit(trans, iter,
- id, POS_MIN,
+ btree, POS_MIN,
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
- snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
- for_each_btree_key_commit(trans, iter,
- id, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- &res, NULL, BCH_TRANS_COMMIT_no_enospc,
- move_key_to_correct_snapshot(trans, &iter, k));
+ delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
+ &equiv_seen, &last_pos));
bch2_disk_reservation_put(c, &res);
darray_exit(&equiv_seen);
@@ -1679,6 +1683,8 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
+ set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name);
+
bch2_delete_dead_snapshots(c);
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index ab13d8f4b41e..31b0ee03e962 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -242,6 +242,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
int bch2_check_snapshot_trees(struct bch_fs *);
int bch2_check_snapshots(struct bch_fs *);
int bch2_reconstruct_snapshots(struct bch_fs *);
+int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c);
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
void bch2_delete_dead_snapshots_work(struct work_struct *);
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index cbad9b27874f..c8c266cb5797 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -300,7 +300,7 @@ not_found:
if (!found && (flags & STR_HASH_must_replace)) {
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
} else if (found && (flags & STR_HASH_must_create)) {
- ret = -EEXIST;
+ ret = -BCH_ERR_EEXIST_str_hash_set;
} else {
if (!found && slot.path)
swap(iter, slot);
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index f1bee6c5222d..b156fc85b8a3 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -649,9 +649,10 @@ reread:
bytes = vstruct_bytes(sb->sb);
- if (bytes > 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits)) {
- prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %lu)",
- bytes, 512UL << sb->sb->layout.sb_max_size_bits);
+ u64 sb_size = 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits);
+ if (bytes > sb_size) {
+ prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %llu)",
+ bytes, sb_size);
return -BCH_ERR_invalid_sb_too_big;
}
@@ -1132,18 +1133,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
* c->sb will be checked before we write the superblock, so update it as
* well:
*/
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
- c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
- }
- if (c->sb.version > bcachefs_metadata_version_current) {
+ if (c->sb.version > bcachefs_metadata_version_current)
c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
- c->sb.version = bcachefs_metadata_version_current;
- }
- if (c->sb.version_min > bcachefs_metadata_version_current) {
+ if (c->sb.version_min > bcachefs_metadata_version_current)
c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
- c->sb.version_min = bcachefs_metadata_version_current;
- }
c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
return ret;
}
@@ -1316,15 +1311,15 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_printf(out, "Device index:\t%u\n", sb->dev_idx);
- prt_str(out, "Label:\t");
+ prt_printf(out, "Label:\t");
prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
prt_newline(out);
- prt_str(out, "Version:\t");
+ prt_printf(out, "Version:\t");
bch2_version_to_text(out, le16_to_cpu(sb->version));
prt_newline(out);
- prt_str(out, "Version upgrade complete:\t");
+ prt_printf(out, "Version upgrade complete:\t");
bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
prt_newline(out);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 2206a8dee693..fb906467201e 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -536,7 +536,6 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c);
- bch2_fs_allocator_background_exit(c);
bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
@@ -564,7 +563,7 @@ static void __bch2_fs_free(struct bch_fs *c)
BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
- EBUG_ON(percpu_u64_get(c->online_reserved));
+ EBUG_ON(c->online_reserved && percpu_u64_get(c->online_reserved));
free_percpu(c->online_reserved);
darray_exit(&c->btree_roots_extra);
@@ -582,8 +581,10 @@ static void __bch2_fs_free(struct bch_fs *c)
if (c->write_ref_wq)
destroy_workqueue(c->write_ref_wq);
- if (c->io_complete_wq)
- destroy_workqueue(c->io_complete_wq);
+ if (c->btree_write_submit_wq)
+ destroy_workqueue(c->btree_write_submit_wq);
+ if (c->btree_read_complete_wq)
+ destroy_workqueue(c->btree_read_complete_wq);
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
if (c->btree_io_complete_wq)
@@ -878,8 +879,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
+ !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
+ !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
+ WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
WQ_FREEZABLE, 0)) ||
#ifndef BCH_WRITE_REF_DEBUG
@@ -908,9 +911,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
bch2_fs_journal_init(&c->journal) ?:
bch2_fs_replicas_init(c) ?:
+ bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_cache_init(c) ?:
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
- bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_interior_update_init(c) ?:
bch2_fs_buckets_waiting_for_journal_init(c) ?:
bch2_fs_btree_write_buffer_init(c) ?:
@@ -927,12 +930,13 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
if (ret)
goto err;
- for (i = 0; i < c->sb.nr_devices; i++)
- if (bch2_member_exists(c->disk_sb.sb, i) &&
- bch2_dev_alloc(c, i)) {
- ret = -EEXIST;
+ for (i = 0; i < c->sb.nr_devices; i++) {
+ if (!bch2_member_exists(c->disk_sb.sb, i))
+ continue;
+ ret = bch2_dev_alloc(c, i);
+ if (ret)
goto err;
- }
+ }
bch2_journal_entry_res_resize(&c->journal,
&c->btree_root_journal_res,
@@ -1190,6 +1194,7 @@ static void bch2_dev_free(struct bch_dev *ca)
kfree(ca->buckets_nouse);
bch2_free_super(&ca->disk_sb);
+ bch2_dev_allocator_background_exit(ca);
bch2_dev_journal_exit(ca);
free_percpu(ca->io_done);
@@ -1312,6 +1317,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
atomic_long_set(&ca->ref, 1);
#endif
+ bch2_dev_allocator_background_init(ca);
+
if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
@@ -1524,6 +1531,7 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
* The allocator thread itself allocates btree nodes, so stop it first:
*/
bch2_dev_allocator_remove(c, ca);
+ bch2_recalc_capacity(c);
bch2_dev_journal_stop(&c->journal, ca);
}
@@ -1535,6 +1543,7 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ bch2_dev_do_discards(ca);
}
int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 477f350a8bd0..e3a57196b0ee 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -741,7 +741,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
ret = btrfs_bio_csum(bbio);
if (ret)
goto fail_put_bio;
- } else if (use_append) {
+ } else if (use_append ||
+ (btrfs_is_zoned(fs_info) && inode &&
+ inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);
if (ret)
goto fail_put_bio;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1e09aeea69c2..1a66be33bb04 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1785,6 +1785,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
struct btrfs_block_group *bg;
struct btrfs_space_info *space_info;
+ LIST_HEAD(retry_list);
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
@@ -1921,8 +1922,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
}
next:
- if (ret)
- btrfs_mark_bg_to_reclaim(bg);
+ if (ret) {
+ /* Refcount held by the reclaim_bgs list after splice. */
+ btrfs_get_block_group(bg);
+ list_add_tail(&bg->bg_list, &retry_list);
+ }
btrfs_put_block_group(bg);
mutex_unlock(&fs_info->reclaim_bgs_lock);
@@ -1942,6 +1946,9 @@ next:
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
end:
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
btrfs_exclop_finish(fs_info);
sb_end_write(fs_info->sb);
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 91c994b569f3..6ed495ca7a31 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -89,6 +89,16 @@ enum {
BTRFS_INODE_FREE_SPACE_INODE,
/* Set when there are no capabilities in XATTs for the inode. */
BTRFS_INODE_NO_CAP_XATTR,
+ /*
+ * Set if an error happened when doing a COW write before submitting a
+ * bio or during writeback. Used for both buffered writes and direct IO
+ * writes. This is to signal a fast fsync that it has to wait for
+ * ordered extents to complete and therefore not log extent maps that
+ * point to unwritten extents (when an ordered extent completes and it
+ * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
+ * range).
+ */
+ BTRFS_INODE_COW_WRITE_ERROR,
};
/* in memory btrfs inode */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1b20b3e390df..38cdb8875e8e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4538,18 +4538,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
- struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
struct btrfs_delayed_ref_node *ref;
- delayed_refs = &trans->delayed_refs;
-
spin_lock(&delayed_refs->lock);
- if (atomic_read(&delayed_refs->num_entries) == 0) {
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info, "delayed_refs has NO entry");
- return;
- }
-
while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
struct rb_node *n;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 597387e9f040..f688fab55251 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3689,6 +3689,8 @@ static struct extent_buffer *grab_extent_buffer(
struct folio *folio = page_folio(page);
struct extent_buffer *exists;
+ lockdep_assert_held(&page->mapping->i_private_lock);
+
/*
* For subpage case, we completely rely on radix tree to ensure we
* don't try to insert two ebs for the same bytenr. So here we always
@@ -3756,13 +3758,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
* The caller needs to free the existing folios and retry using the same order.
*/
static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ struct btrfs_subpage *prealloc,
struct extent_buffer **found_eb_ret)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
const unsigned long index = eb->start >> PAGE_SHIFT;
- struct folio *existing_folio;
+ struct folio *existing_folio = NULL;
int ret;
ASSERT(found_eb_ret);
@@ -3774,12 +3777,14 @@ retry:
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
- return 0;
+ goto finish;
existing_folio = filemap_lock_folio(mapping, index + i);
/* The page cache only exists for a very short time, just retry. */
- if (IS_ERR(existing_folio))
+ if (IS_ERR(existing_folio)) {
+ existing_folio = NULL;
goto retry;
+ }
/* For now, we should only have single-page folios for btree inode. */
ASSERT(folio_nr_pages(existing_folio) == 1);
@@ -3790,14 +3795,13 @@ retry:
return -EAGAIN;
}
- if (fs_info->nodesize < PAGE_SIZE) {
- /*
- * We're going to reuse the existing page, can drop our page
- * and subpage structure now.
- */
+finish:
+ spin_lock(&mapping->i_private_lock);
+ if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
+ /* We're going to reuse the existing page, can drop our folio now. */
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
- } else {
+ } else if (existing_folio) {
struct extent_buffer *existing_eb;
existing_eb = grab_extent_buffer(fs_info,
@@ -3805,6 +3809,7 @@ retry:
if (existing_eb) {
/* The extent buffer still exists, we can use it directly. */
*found_eb_ret = existing_eb;
+ spin_unlock(&mapping->i_private_lock);
folio_unlock(existing_folio);
folio_put(existing_folio);
return 1;
@@ -3813,6 +3818,22 @@ retry:
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
}
+ eb->folio_size = folio_size(eb->folios[i]);
+ eb->folio_shift = folio_shift(eb->folios[i]);
+ /* Should not fail, as we have preallocated the memory. */
+ ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
+ ASSERT(!ret);
+ /*
+ * To inform we have an extra eb under allocation, so that
+ * detach_extent_buffer_page() won't release the folio private when the
+ * eb hasn't been inserted into radix tree yet.
+ *
+ * The ref will be decreased when the eb releases the page, in
+ * detach_extent_buffer_page(). Thus needs no special handling in the
+ * error path.
+ */
+ btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
@@ -3824,7 +3845,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int attached = 0;
struct extent_buffer *eb;
struct extent_buffer *existing_eb = NULL;
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct btrfs_subpage *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
@@ -3890,7 +3910,7 @@ reallocate:
for (int i = 0; i < num_folios; i++) {
struct folio *folio;
- ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
+ ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
if (ret > 0) {
ASSERT(existing_eb);
goto out;
@@ -3927,24 +3947,6 @@ reallocate:
* and free the allocated page.
*/
folio = eb->folios[i];
- eb->folio_size = folio_size(folio);
- eb->folio_shift = folio_shift(folio);
- spin_lock(&mapping->i_private_lock);
- /* Should not fail, as we have preallocated the memory */
- ret = attach_extent_buffer_folio(eb, folio, prealloc);
- ASSERT(!ret);
- /*
- * To inform we have extra eb under allocation, so that
- * detach_extent_buffer_page() won't release the folio private
- * when the eb hasn't yet been inserted into radix tree.
- *
- * The ref will be decreased when the eb released the page, in
- * detach_extent_buffer_page().
- * Thus needs no special handling in error path.
- */
- btrfs_folio_inc_eb_refs(fs_info, folio);
- spin_unlock(&mapping->i_private_lock);
-
WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
/*
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e764ac3f22e2..d90138683a0a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1885,6 +1885,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
if (full_sync || btrfs_is_zoned(fs_info)) {
ret = btrfs_wait_ordered_range(inode, start, len);
+ clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &BTRFS_I(inode)->runtime_flags);
} else {
/*
* Get our ordered extents as soon as possible to avoid doing
@@ -1894,6 +1895,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
&ctx.ordered_extents);
ret = filemap_fdatawait_range(inode->i_mapping, start, end);
+ if (ret)
+ goto out_release_extents;
+
+ /*
+ * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
+ * starting and waiting for writeback, because for buffered IO
+ * it may have been set during the end IO callback
+ * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
+ * case an error happened and we need to wait for ordered
+ * extents to complete so that any extent maps that point to
+ * unwritten locations are dropped and we don't log them.
+ */
+ if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = btrfs_wait_ordered_range(inode, start, len);
}
if (ret)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3ab8dea5036b..dabc3d0793cf 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2697,7 +2697,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
int bg_reclaim_threshold = 0;
- bool initial = (size == block_group->length);
+ bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
u64 reclaimable_unusable;
WARN_ON(!initial && offset + size > block_group->zone_capacity);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index c5bdd674f55c..35a413ce935d 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -388,6 +388,37 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ /*
+ * If this is a COW write it means we created new extent maps for the
+ * range and they point to unwritten locations if we got an error either
+ * before submitting a bio or during IO.
+ *
+ * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
+ * are queuing its completion below. During completion, at
+ * btrfs_finish_one_ordered(), we will drop the extent maps for the
+ * unwritten extents.
+ *
+ * However because completion runs in a work queue we can end up having
+ * a fast fsync running before that. In the case of direct IO, once we
+ * unlock the inode the fsync might start, and we queue the completion
+ * before unlocking the inode. In the case of buffered IO when writeback
+ * finishes (end_bbio_data_write()) we queue the completion, so if the
+ * writeback was triggered by a fast fsync, the fsync might start
+ * logging before ordered extent completion runs in the work queue.
+ *
+ * The fast fsync will log file extent items based on the extent maps it
+ * finds, so if by the time it collects extent maps the ordered extent
+ * completion didn't happen yet, it will log file extent items that
+ * point to unwritten extents, resulting in a corruption if a crash
+ * happens and the log tree is replayed. Note that a fast fsync does not
+ * wait for completion of ordered extents in order to reduce latency.
+ *
+ * Set a flag in the inode so that the next fast fsync will wait for
+ * ordered extents to complete before starting to log.
+ */
+ if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
+ set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
+
if (ret)
btrfs_queue_ordered_fn(ordered);
return ret;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fc2a7ea26354..bf0f81d59b6b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1351,7 +1351,7 @@ static int flush_reservations(struct btrfs_fs_info *fs_info)
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *quota_root;
+ struct btrfs_root *quota_root = NULL;
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
@@ -1449,9 +1449,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
quota_root->node, 0, 1);
- btrfs_put_root(quota_root);
out:
+ btrfs_put_root(quota_root);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (ret && trans)
btrfs_end_transaction(trans);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index afd6932f5e89..d7caa3732f07 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1688,20 +1688,24 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
(i << fs_info->sectorsize_bits);
int err;
- bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
- fs_info, scrub_read_endio, stripe);
- bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
-
io_stripe.is_scrub = true;
+ stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
+ /*
+ * For RST cases, we need to manually split the bbio to
+ * follow the RST boundary.
+ */
err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
- &stripe_len, &bioc, &io_stripe,
- &mirror);
+ &stripe_len, &bioc, &io_stripe, &mirror);
btrfs_put_bioc(bioc);
- if (err) {
- btrfs_bio_end_io(bbio,
- errno_to_blk_status(err));
- return;
+ if (err < 0) {
+ set_bit(i, &stripe->io_error_bitmap);
+ set_bit(i, &stripe->error_bitmap);
+ continue;
}
+
+ bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
+ fs_info, scrub_read_endio, stripe);
+ bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
}
__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5146387b416b..0bce1d45e252 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -138,6 +138,25 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
* and once to do all the other items.
*/
+static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+{
+ unsigned int nofs_flag;
+ struct inode *inode;
+
+ /*
+ * We're holding a transaction handle whether we are logging or
+ * replaying a log tree, so we must make sure NOFS semantics apply
+ * because btrfs_alloc_inode() may be triggered and it uses GFP_KERNEL
+ * to allocate an inode, which can recurse back into the filesystem and
+ * attempt a transaction commit, resulting in a deadlock.
+ */
+ nofs_flag = memalloc_nofs_save();
+ inode = btrfs_iget(root->fs_info->sb, objectid, root);
+ memalloc_nofs_restore(nofs_flag);
+
+ return inode;
+}
+
/*
* start a sub transaction and setup the log tree
* this increments the log tree writer count to make the people
@@ -600,7 +619,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
{
struct inode *inode;
- inode = btrfs_iget(root->fs_info->sb, objectid, root);
+ inode = btrfs_iget_logging(objectid, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
@@ -4860,18 +4879,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
path->slots[0]++;
continue;
}
- if (!dropped_extents) {
- /*
- * Avoid logging extent items logged in past fsync calls
- * and leading to duplicate keys in the log tree.
- */
+ /*
+ * Avoid overlapping items in the log tree. The first time we
+ * get here, get rid of everything from a past fsync. After
+ * that, if the current extent starts before the end of the last
+ * extent we copied, truncate the last one. This can happen if
+ * an ordered extent completion modifies the subvolume tree
+ * while btrfs_next_leaf() has the tree unlocked.
+ */
+ if (!dropped_extents || key.offset < truncate_offset) {
ret = truncate_inode_items(trans, root->log_root, inode,
- truncate_offset,
+ min(key.offset, truncate_offset),
BTRFS_EXTENT_DATA_KEY);
if (ret)
goto out;
dropped_extents = true;
}
+ truncate_offset = btrfs_file_extent_end(path);
if (ins_nr == 0)
start_slot = slot;
ins_nr++;
@@ -5433,7 +5457,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_root *root = start_inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
LIST_HEAD(dir_list);
struct btrfs_dir_list *dir_elem;
@@ -5494,7 +5517,7 @@ again:
continue;
btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
+ di_inode = btrfs_iget_logging(di_key.objectid, root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto out;
@@ -5554,7 +5577,7 @@ again:
btrfs_add_delayed_iput(curr_inode);
curr_inode = NULL;
- vfs_inode = btrfs_iget(fs_info->sb, ino, root);
+ vfs_inode = btrfs_iget_logging(ino, root);
if (IS_ERR(vfs_inode)) {
ret = PTR_ERR(vfs_inode);
break;
@@ -5649,7 +5672,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
return BTRFS_LOG_FORCE_COMMIT;
- inode = btrfs_iget(root->fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
/*
* If the other inode that had a conflicting dir entry was deleted in
* the current transaction then we either:
@@ -5750,7 +5773,6 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
/*
@@ -5781,7 +5803,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
list_del(&curr->list);
kfree(curr);
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
@@ -5792,7 +5814,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
if (ret != -ENOENT)
break;
- inode = btrfs_iget(fs_info->sb, parent, root);
+ inode = btrfs_iget_logging(parent, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
break;
@@ -6314,7 +6336,6 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
const bool orig_log_new_dentries = ctx->log_new_dentries;
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_item *item;
int ret = 0;
@@ -6340,7 +6361,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
if (key.type == BTRFS_ROOT_ITEM_KEY)
continue;
- di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root);
+ di_inode = btrfs_iget_logging(key.objectid, inode->root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
break;
@@ -6724,7 +6745,6 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@@ -6789,8 +6809,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
- root);
+ dir_inode = btrfs_iget_logging(inode_key.objectid, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -6852,7 +6871,6 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
while (true) {
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
int slot;
struct btrfs_key search_key;
@@ -6867,7 +6885,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 6465e2574230..06cdf1a8a16f 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
return 0;
}
-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+void cachefiles_flush_reqs(struct cachefiles_cache *cache)
{
struct xarray *xa = &cache->reqs;
struct cachefiles_req *req;
@@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
xa_for_each(xa, index, req) {
req->error = -EIO;
complete(&req->done);
+ __xa_erase(xa, index);
}
xa_unlock(xa);
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index d33169f0018b..6845a90cdfcc 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
int ondemand_id;
enum cachefiles_object_state state;
struct cachefiles_object *object;
+ spinlock_t lock;
};
/*
@@ -138,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
struct cachefiles_req {
struct cachefiles_object *object;
struct completion done;
+ refcount_t ref;
int error;
struct cachefiles_msg msg;
};
@@ -186,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
+extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
@@ -424,6 +427,8 @@ do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ if (cachefiles_in_ondemand_mode(___cache)) \
+ cachefiles_flush_reqs(___cache); \
} while (0)
#define cachefiles_io_error_obj(object, FMT, ...) \
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 4ba42f1fa3b4..bce005f2b456 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -1,22 +1,42 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/uio.h>
#include "internal.h"
+struct ondemand_anon_file {
+ struct file *file;
+ int fd;
+};
+
+static inline void cachefiles_req_put(struct cachefiles_req *req)
+{
+ if (refcount_dec_and_test(&req->ref))
+ kfree(req);
+}
+
static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
- struct cachefiles_cache *cache = object->volume->cache;
- struct cachefiles_ondemand_info *info = object->ondemand;
- int object_id = info->ondemand_id;
+ struct cachefiles_cache *cache;
+ struct cachefiles_ondemand_info *info;
+ int object_id;
struct cachefiles_req *req;
- XA_STATE(xas, &cache->reqs, 0);
+ XA_STATE(xas, NULL, 0);
+
+ if (!object)
+ return 0;
+
+ info = object->ondemand;
+ cache = object->volume->cache;
+ xas.xa = &cache->reqs;
xa_lock(&cache->reqs);
+ spin_lock(&info->lock);
+ object_id = info->ondemand_id;
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&info->lock);
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
@@ -76,12 +96,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
}
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
- unsigned long arg)
+ unsigned long id)
{
struct cachefiles_object *object = filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_req *req;
- unsigned long id;
+ XA_STATE(xas, &cache->reqs, id);
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
return -EINVAL;
@@ -89,10 +109,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
- id = arg;
- req = xa_erase(&cache->reqs, id);
- if (!req)
+ xa_lock(&cache->reqs);
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
+ req->object != object) {
+ xa_unlock(&cache->reqs);
return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
trace_cachefiles_ondemand_cread(object, id);
complete(&req->done);
@@ -116,10 +141,12 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
{
struct cachefiles_req *req;
struct fscache_cookie *cookie;
+ struct cachefiles_ondemand_info *info;
char *pid, *psize;
unsigned long id;
long size;
int ret;
+ XA_STATE(xas, &cache->reqs, 0);
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
@@ -143,10 +170,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
if (ret)
return ret;
- req = xa_erase(&cache->reqs, id);
- if (!req)
+ xa_lock(&cache->reqs);
+ xas.xa_index = id;
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
+ !req->object->ondemand->ondemand_id) {
+ xa_unlock(&cache->reqs);
return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
+ info = req->object->ondemand;
/* fail OPEN request if copen format is invalid */
ret = kstrtol(psize, 0, &size);
if (ret) {
@@ -166,6 +201,32 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
goto out;
}
+ spin_lock(&info->lock);
+ /*
+ * The anonymous fd was closed before copen ? Fail the request.
+ *
+ * t1 | t2
+ * ---------------------------------------------------------
+ * cachefiles_ondemand_copen
+ * req = xa_erase(&cache->reqs, id)
+ * // Anon fd is maliciously closed.
+ * cachefiles_ondemand_fd_release
+ * xa_lock(&cache->reqs)
+ * cachefiles_ondemand_set_object_close(object)
+ * xa_unlock(&cache->reqs)
+ * cachefiles_ondemand_set_object_open
+ * // No one will ever close it again.
+ * cachefiles_ondemand_daemon_read
+ * cachefiles_ondemand_select_req
+ *
+ * Get a read req but its fd is already closed. The daemon can't
+ * issue a cread ioctl with an closed fd, then hung.
+ */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
+ spin_unlock(&info->lock);
+ req->error = -EBADFD;
+ goto out;
+ }
cookie = req->object->cookie;
cookie->object_size = size;
if (size)
@@ -175,9 +236,15 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
trace_cachefiles_ondemand_copen(req->object, id, size);
cachefiles_ondemand_set_object_open(req->object);
+ spin_unlock(&info->lock);
wake_up_all(&cache->daemon_pollwq);
out:
+ spin_lock(&info->lock);
+ /* Need to set object close to avoid reopen status continuing */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
+ cachefiles_ondemand_set_object_close(req->object);
+ spin_unlock(&info->lock);
complete(&req->done);
return ret;
}
@@ -205,14 +272,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
return 0;
}
-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
+ struct ondemand_anon_file *anon_file)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct cachefiles_open *load;
- struct file *file;
u32 object_id;
- int ret, fd;
+ int ret;
object = cachefiles_grab_object(req->object,
cachefiles_obj_get_ondemand_fd);
@@ -224,35 +291,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
if (ret < 0)
goto err;
- fd = get_unused_fd_flags(O_WRONLY);
- if (fd < 0) {
- ret = fd;
+ anon_file->fd = get_unused_fd_flags(O_WRONLY);
+ if (anon_file->fd < 0) {
+ ret = anon_file->fd;
goto err_free_id;
}
- file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
- object, O_WRONLY);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ anon_file->file = anon_inode_getfile("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object, O_WRONLY);
+ if (IS_ERR(anon_file->file)) {
+ ret = PTR_ERR(anon_file->file);
goto err_put_fd;
}
- file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
- fd_install(fd, file);
+ spin_lock(&object->ondemand->lock);
+ if (object->ondemand->ondemand_id > 0) {
+ spin_unlock(&object->ondemand->lock);
+ /* Pair with check in cachefiles_ondemand_fd_release(). */
+ anon_file->file->private_data = NULL;
+ ret = -EEXIST;
+ goto err_put_file;
+ }
+
+ anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
load = (void *)req->msg.data;
- load->fd = fd;
+ load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
+ spin_unlock(&object->ondemand->lock);
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
+err_put_file:
+ fput(anon_file->file);
+ anon_file->file = NULL;
err_put_fd:
- put_unused_fd(fd);
+ put_unused_fd(anon_file->fd);
+ anon_file->fd = ret;
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
+ spin_lock(&object->ondemand->lock);
+ /* Avoid marking an opened object as closed. */
+ if (object->ondemand->ondemand_id <= 0)
+ cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&object->ondemand->lock);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
@@ -294,14 +379,28 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
return NULL;
}
+static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
+ struct xa_state *xas, int err)
+{
+ if (unlikely(!xas || !req))
+ return false;
+
+ if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
+ return false;
+
+ req->error = err;
+ complete(&req->done);
+ return true;
+}
+
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
struct cachefiles_req *req;
struct cachefiles_msg *msg;
- unsigned long id = 0;
size_t n;
int ret = 0;
+ struct ondemand_anon_file anon_file;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
xa_lock(&cache->reqs);
@@ -330,42 +429,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
+ refcount_inc(&req->ref);
+ cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
xa_unlock(&cache->reqs);
- id = xas.xa_index;
-
if (msg->opcode == CACHEFILES_OP_OPEN) {
- ret = cachefiles_ondemand_get_fd(req);
- if (ret) {
- cachefiles_ondemand_set_object_close(req->object);
- goto error;
- }
+ ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ if (ret)
+ goto out;
}
- msg->msg_id = id;
+ msg->msg_id = xas.xa_index;
msg->object_id = req->object->ondemand->ondemand_id;
- if (copy_to_user(_buffer, msg, n) != 0) {
+ if (copy_to_user(_buffer, msg, n) != 0)
ret = -EFAULT;
- goto err_put_fd;
- }
- /* CLOSE request has no reply */
- if (msg->opcode == CACHEFILES_OP_CLOSE) {
- xa_erase(&cache->reqs, id);
- complete(&req->done);
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+ if (ret < 0) {
+ fput(anon_file.file);
+ put_unused_fd(anon_file.fd);
+ goto out;
+ }
+ fd_install(anon_file.fd, anon_file.file);
}
-
- return n;
-
-err_put_fd:
- if (msg->opcode == CACHEFILES_OP_OPEN)
- close_fd(((struct cachefiles_open *)msg->data)->fd);
-error:
- xa_erase(&cache->reqs, id);
- req->error = ret;
- complete(&req->done);
- return ret;
+out:
+ cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ /* Remove error request and CLOSE request has no reply */
+ if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
+ cachefiles_ondemand_finish_req(req, &xas, ret);
+ cachefiles_req_put(req);
+ return ret ? ret : n;
}
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
@@ -395,6 +489,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
}
+ refcount_set(&req->ref, 1);
req->object = object;
init_completion(&req->done);
req->msg.opcode = opcode;
@@ -454,9 +549,19 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
wake_up_all(&cache->daemon_pollwq);
- wait_for_completion(&req->done);
- ret = req->error;
- kfree(req);
+wait:
+ ret = wait_for_completion_killable(&req->done);
+ if (!ret) {
+ ret = req->error;
+ } else {
+ ret = -EINTR;
+ if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
+ /* Someone will complete it soon. */
+ cpu_relax();
+ goto wait;
+ }
+ }
+ cachefiles_req_put(req);
return ret;
out:
/* Reset the object to close state in error handling path.
@@ -578,6 +683,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
return -ENOMEM;
object->ondemand->object = object;
+ spin_lock_init(&object->ondemand->lock);
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
return 0;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 1ee6404b430b..407095188f83 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2360,17 +2360,19 @@ EXPORT_SYMBOL(d_hash_and_lookup);
* - unhash this dentry and free it.
*
* Usually, we want to just turn this into
- * a negative dentry, but certain workloads can
- * generate a large number of negative dentries.
- * Therefore, it would be better to simply
- * unhash it.
+ * a negative dentry, but if anybody else is
+ * currently using the dentry or the inode
+ * we can't do that and we fall back on removing
+ * it from the hash queues and waiting for
+ * it to be deleted later when it has no users
*/
/**
* d_delete - delete a dentry
* @dentry: The dentry to delete
*
- * Remove the dentry from the hash queues so it can be deleted later.
+ * Turn the dentry into a negative dentry if possible, otherwise
+ * remove it from the hash queues so it can be deleted later
*/
void d_delete(struct dentry * dentry)
@@ -2379,8 +2381,6 @@ void d_delete(struct dentry * dentry)
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
- __d_drop(dentry);
-
/*
* Are we the only user?
*/
@@ -2388,6 +2388,7 @@ void d_delete(struct dentry * dentry)
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_unlink_inode(dentry);
} else {
+ __d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index dc51df0b118d..8fd928899a59 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -107,8 +107,16 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param
int opt;
opt = fs_parse(fc, debugfs_param_specs, param, &result);
- if (opt < 0)
+ if (opt < 0) {
+ /*
+ * We might like to report bad mount options here; but
+ * traditionally debugfs has ignored all mount options
+ */
+ if (opt == -ENOPARAM)
+ return 0;
+
return opt;
+ }
switch (opt) {
case Opt_uid:
diff --git a/fs/file.c b/fs/file.c
index 8076aef9c210..a3b72aa64f11 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -486,12 +486,12 @@ struct files_struct init_files = {
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
{
- unsigned int maxfd = fdt->max_fds;
+ unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
unsigned int maxbit = maxfd / BITS_PER_LONG;
unsigned int bitbit = start / BITS_PER_LONG;
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
- if (bitbit > maxfd)
+ if (bitbit >= maxfd)
return maxfd;
if (bitbit > start)
start = bitbit;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 41c8f0c68ef5..d46558990279 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -241,6 +241,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
unsigned block_size = (1 << block_bits);
size_t poff = offset_in_folio(folio, *pos);
size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
+ size_t orig_plen = plen;
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -277,7 +278,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
- if (orig_pos <= isize && orig_pos + length > isize) {
+ if (orig_pos <= isize && orig_pos + orig_plen > isize) {
unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
@@ -877,37 +878,51 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t old_size = iter->inode->i_size;
+ size_t written;
if (srcmap->type == IOMAP_INLINE) {
iomap_write_end_inline(iter, folio, pos, copied);
- return true;
+ written = copied;
+ } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ written = block_write_end(NULL, iter->inode->i_mapping, pos,
+ len, copied, &folio->page, NULL);
+ WARN_ON_ONCE(written != copied && written != 0);
+ } else {
+ written = __iomap_write_end(iter->inode, pos, len, copied,
+ folio) ? copied : 0;
}
- if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
- size_t bh_written;
-
- bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
- len, copied, &folio->page, NULL);
- WARN_ON_ONCE(bh_written != copied && bh_written != 0);
- return bh_written == copied;
+ /*
+ * Update the in-memory inode size after copying the data into the page
+ * cache. It's up to the file system to write the updated size to disk,
+ * preferably after I/O completion so that no stale data is exposed.
+ * Only once that's done can we unlock and release the folio.
+ */
+ if (pos + written > old_size) {
+ i_size_write(iter->inode, pos + written);
+ iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
+ __iomap_put_folio(iter, pos, written, folio);
- return __iomap_write_end(iter->inode, pos, len, copied, folio);
+ if (old_size < pos)
+ pagecache_isize_extended(iter->inode, old_size, pos);
+
+ return written == copied;
}
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
loff_t length = iomap_length(iter);
- size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
ssize_t total_written = 0;
long status = 0;
struct address_space *mapping = iter->inode->i_mapping;
+ size_t chunk = mapping_max_folio_size(mapping);
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
struct folio *folio;
- loff_t old_size;
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
@@ -959,23 +974,6 @@ retry:
written = iomap_write_end(iter, pos, bytes, copied, folio) ?
copied : 0;
- /*
- * Update the in-memory inode size after copying the data into
- * the page cache. It's up to the file system to write the
- * updated size to disk, preferably after I/O completion so that
- * no stale data is exposed. Only once that's done can we
- * unlock and release the folio.
- */
- old_size = iter->inode->i_size;
- if (pos + written > old_size) {
- i_size_write(iter->inode, pos + written);
- iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
- }
- __iomap_put_folio(iter, pos, written, folio);
-
- if (old_size < pos)
- pagecache_isize_extended(iter->inode, old_size, pos);
-
cond_resched();
if (unlikely(written == 0)) {
/*
@@ -1346,7 +1344,6 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
bytes = folio_size(folio) - offset;
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
@@ -1412,7 +1409,6 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
folio_mark_accessed(folio);
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 0fb7afac298e..9987055293b3 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+ int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
+
printk(KERN_ERR "ea_get: invalid extended attribute\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
- ea_buf->xattr, ea_size, 1);
+ ea_buf->xattr, size, 1);
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 1121601536d1..07bc1fd43530 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -181,7 +181,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio, *writethrough = NULL;
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
- unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+ unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index f516460e994e..e14cd53ac9fd 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -12,7 +12,7 @@
static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
{
struct inode *inode = wreq->inode;
- unsigned long long end = wreq->start + wreq->len;
+ unsigned long long end = wreq->start + wreq->transferred;
if (!wreq->error &&
i_size_read(inode) < end) {
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index c90d482b1650..f4a642727479 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -72,6 +72,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
}
+ atomic_inc(&ctx->io_count);
trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq);
@@ -124,6 +125,7 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
@@ -142,6 +144,9 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
+
+ if (atomic_dec_and_test(&ictx->io_count))
+ wake_up_var(&ictx->io_count);
call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 60112e4b2c5e..426cf87aaf2e 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -510,7 +510,7 @@ reassess_streams:
* stream has a gap that can be jumped.
*/
if (notes & SOME_EMPTY) {
- unsigned long long jump_to = wreq->start + wreq->len;
+ unsigned long long jump_to = wreq->start + READ_ONCE(wreq->submitted);
for (s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s];
@@ -690,10 +690,11 @@ void netfs_write_collection_worker(struct work_struct *work)
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
if (wreq->iocb) {
- wreq->iocb->ki_pos += wreq->transferred;
+ size_t written = min(wreq->transferred, wreq->len);
+ wreq->iocb->ki_pos += written;
if (wreq->iocb->ki_complete)
wreq->iocb->ki_complete(
- wreq->iocb, wreq->error ? wreq->error : wreq->transferred);
+ wreq->iocb, wreq->error ? wreq->error : written);
wreq->iocb = VFS_PTR_POISON;
}
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index e190043bc0da..3aa86e268f40 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -254,7 +254,7 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
stream->construct = NULL;
if (subreq->start + subreq->len > wreq->start + wreq->submitted)
- wreq->len = wreq->submitted = subreq->start + subreq->len - wreq->start;
+ WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
netfs_do_issue_write(stream, subreq);
}
@@ -636,7 +636,12 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
mutex_unlock(&ictx->wb_lock);
- ret = wreq->error;
+ if (wreq->iocb) {
+ ret = -EIOCBQUEUED;
+ } else {
+ wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
+ ret = wreq->error;
+ }
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
return ret;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 342930996226..07a7be27182e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1627,7 +1627,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
switch (error) {
case 1:
break;
- case 0:
+ case -ETIMEDOUT:
+ if (inode && (IS_ROOT(dentry) ||
+ NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
+ error = 1;
+ break;
+ case -ESTALE:
+ case -ENOENT:
+ error = 0;
+ fallthrough;
+ default:
/*
* We can't d_drop the root of a disconnected tree:
* its d_hash is on the s_anon list and d_drop() would hide
@@ -1682,18 +1691,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
dir_verifier = nfs_save_change_attribute(dir);
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
- if (ret < 0) {
- switch (ret) {
- case -ESTALE:
- case -ENOENT:
- ret = 0;
- break;
- case -ETIMEDOUT:
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
- ret = 1;
- }
+ if (ret < 0)
goto out;
- }
/* Request help from readdirplus */
nfs_lookup_advise_force_readdirplus(dir, flags);
@@ -1737,7 +1736,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode;
- int error;
+ int error = 0;
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
inode = d_inode(dentry);
@@ -1782,7 +1781,7 @@ out_valid:
out_bad:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, error);
}
static int
@@ -1804,9 +1803,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else {
- /* Wait for unlink to complete */
+ /* Wait for unlink to complete - see unblock_revalidate() */
wait_var_event(&dentry->d_fsdata,
- dentry->d_fsdata != NFS_FSDATA_BLOCKED);
+ smp_load_acquire(&dentry->d_fsdata)
+ != NFS_FSDATA_BLOCKED);
parent = dget_parent(dentry);
ret = reval(d_inode(parent), dentry, flags);
dput(parent);
@@ -1819,6 +1819,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
}
+static void block_revalidate(struct dentry *dentry)
+{
+ /* old devname - just in case */
+ kfree(dentry->d_fsdata);
+
+ /* Any new reference that could lead to an open
+ * will take ->d_lock in lookup_open() -> d_lookup().
+ * Holding this lock ensures we cannot race with
+ * __nfs_lookup_revalidate() and removes and need
+ * for further barriers.
+ */
+ lockdep_assert_held(&dentry->d_lock);
+
+ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+}
+
+static void unblock_revalidate(struct dentry *dentry)
+{
+ /* store_release ensures wait_var_event() sees the update */
+ smp_store_release(&dentry->d_fsdata, NULL);
+ wake_up_var(&dentry->d_fsdata);
+}
+
/*
* A weaker form of d_revalidate for revalidating just the d_inode(dentry)
* when we don't really care about the dentry name. This is called when a
@@ -2255,6 +2278,9 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
*/
int error = 0;
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+ return -ENAMETOOLONG;
+
if (open_flags & O_CREAT) {
file->f_mode |= FMODE_CREATED;
error = nfs_do_create(dir, dentry, mode, open_flags);
@@ -2549,15 +2575,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
goto out;
}
- /* old devname */
- kfree(dentry->d_fsdata);
- dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(dentry);
spin_unlock(&dentry->d_lock);
error = nfs_safe_remove(dentry);
nfs_dentry_remove_handle_error(dir, dentry, error);
- dentry->d_fsdata = NULL;
- wake_up_var(&dentry->d_fsdata);
+ unblock_revalidate(dentry);
out:
trace_nfs_unlink_exit(dir, dentry, error);
return error;
@@ -2664,8 +2687,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
{
struct dentry *new_dentry = data->new_dentry;
- new_dentry->d_fsdata = NULL;
- wake_up_var(&new_dentry->d_fsdata);
+ unblock_revalidate(new_dentry);
}
/*
@@ -2727,11 +2749,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
goto out;
- if (new_dentry->d_fsdata) {
- /* old devname */
- kfree(new_dentry->d_fsdata);
- new_dentry->d_fsdata = NULL;
- }
spin_lock(&new_dentry->d_lock);
if (d_count(new_dentry) > 2) {
@@ -2753,7 +2770,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
new_dentry = dentry;
new_inode = NULL;
} else {
- new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(new_dentry);
must_unblock = true;
spin_unlock(&new_dentry->d_lock);
}
@@ -2765,6 +2782,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
must_unblock ? nfs_unblock_rename : NULL);
if (IS_ERR(task)) {
+ if (must_unblock)
+ unblock_revalidate(new_dentry);
error = PTR_ERR(task);
goto out;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bb2f583eb28b..90079ca134dd 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -141,8 +141,6 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t ret;
- VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
-
if (iov_iter_rw(iter) == READ)
ret = nfs_file_direct_read(iocb, iter, true);
else
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c93c12063b3a..a691fa10b3e9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4023,6 +4023,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
}
}
+static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
+ struct nfs4_pathname *path2)
+{
+ int i;
+
+ if (path1->ncomponents != path2->ncomponents)
+ return false;
+ for (i = 0; i < path1->ncomponents; i++) {
+ if (path1->components[i].len != path2->components[i].len)
+ return false;
+ if (memcmp(path1->components[i].data, path2->components[i].data,
+ path1->components[i].len))
+ return false;
+ }
+ return true;
+}
+
static int _nfs4_discover_trunking(struct nfs_server *server,
struct nfs_fh *fhandle)
{
@@ -4056,9 +4073,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
if (status)
goto out_free_3;
- for (i = 0; i < locations->nlocations; i++)
+ for (i = 0; i < locations->nlocations; i++) {
+ if (!_is_same_nfs4_pathname(&locations->fs_path,
+ &locations->locations[i].rootpath))
+ continue;
test_fs_location_for_trunking(&locations->locations[i], clp,
server);
+ }
out_free_3:
kfree(locations->fattr);
out_free_2:
@@ -6268,6 +6289,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
if (status == 0)
nfs_setsecurity(inode, fattr);
+ nfs_free_fattr(fattr);
return status;
}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6efb5068c116..040b6b79c75e 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
continue;
} else if (index == prev->wb_index + 1)
continue;
+ /*
+ * We will submit more requests after these. Indicate
+ * this to the underlying layers.
+ */
+ desc->pg_moreio = 1;
nfs_pageio_complete(desc);
break;
}
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 0e27a2e4e68b..13818129d268 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
error:
folio_set_error(folio);
folio_unlock(folio);
- return -EIO;
+ return error;
}
static const char *nfs_get_link(struct dentry *dentry,
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
index 62d2586d9902..529a75ecf22e 100644
--- a/fs/nfsd/netlink.c
+++ b/fs/nfsd/netlink.c
@@ -44,9 +44,7 @@ static const struct nla_policy nfsd_listener_set_nl_policy[NFSD_A_SERVER_SOCK_AD
static const struct genl_split_ops nfsd_nl_ops[] = {
{
.cmd = NFSD_CMD_RPC_STATUS_GET,
- .start = nfsd_nl_rpc_status_get_start,
.dumpit = nfsd_nl_rpc_status_get_dumpit,
- .done = nfsd_nl_rpc_status_get_done,
.flags = GENL_CMD_CAP_DUMP,
},
{
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
index e3724637d64d..2e132ef328f8 100644
--- a/fs/nfsd/netlink.h
+++ b/fs/nfsd/netlink.h
@@ -15,9 +15,6 @@
extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1];
extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1];
-int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
-int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
-
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 202140df8f82..c848ebe5d08f 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1460,28 +1460,6 @@ static int create_proc_exports_entry(void)
unsigned int nfsd_net_id;
-/**
- * nfsd_nl_rpc_status_get_start - Prepare rpc_status_get dumpit
- * @cb: netlink metadata and command arguments
- *
- * Return values:
- * %0: The rpc_status_get command may proceed
- * %-ENODEV: There is no NFSD running in this namespace
- */
-int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb)
-{
- struct nfsd_net *nn = net_generic(sock_net(cb->skb->sk), nfsd_net_id);
- int ret = -ENODEV;
-
- mutex_lock(&nfsd_mutex);
- if (nn->nfsd_serv)
- ret = 0;
- else
- mutex_unlock(&nfsd_mutex);
-
- return ret;
-}
-
static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
struct netlink_callback *cb,
struct nfsd_genl_rqstp *rqstp)
@@ -1558,8 +1536,16 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct nfsd_net *nn = net_generic(sock_net(skb->sk), nfsd_net_id);
int i, ret, rqstp_index = 0;
+ struct nfsd_net *nn;
+
+ mutex_lock(&nfsd_mutex);
+
+ nn = net_generic(sock_net(skb->sk), nfsd_net_id);
+ if (!nn->nfsd_serv) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
rcu_read_lock();
@@ -1636,22 +1622,10 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
ret = skb->len;
out:
rcu_read_unlock();
-
- return ret;
-}
-
-/**
- * nfsd_nl_rpc_status_get_done - rpc_status_get dumpit post-processing
- * @cb: netlink metadata and command arguments
- *
- * Return values:
- * %0: Success
- */
-int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
-{
+out_unlock:
mutex_unlock(&nfsd_mutex);
- return 0;
+ return ret;
}
/**
@@ -2195,6 +2169,8 @@ static __net_init int nfsd_net_init(struct net *net)
nn->nfsd_svcstats.program = &nfsd_program;
nn->nfsd_versions = NULL;
nn->nfsd4_minorversions = NULL;
+ nn->nfsd_info.mutex = &nfsd_mutex;
+ nn->nfsd_serv = NULL;
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cd9a6a1a9fc8..89d7918de7b1 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -672,7 +672,6 @@ int nfsd_create_serv(struct net *net)
return error;
}
spin_lock(&nfsd_notifier_lock);
- nn->nfsd_info.mutex = &nfsd_mutex;
nn->nfsd_serv = serv;
spin_unlock(&nfsd_notifier_lock);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index a002a44ff161..52e50b1b7f22 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -607,7 +607,7 @@ int nilfs_empty_dir(struct inode *inode)
kaddr = nilfs_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
- continue;
+ return 0;
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 60d4f59f7665..6ea81f1d5094 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh->b_folio != bd_folio) {
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f0467d3b3c88..6be175a1ab3c 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2366,6 +2366,11 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
}
list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
+ ret = ocfs2_assure_trans_credits(handle, credits);
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
ret = ocfs2_mark_extent_written(inode, &et, handle,
ue->ue_cpos, 1,
ue->ue_phys,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 604fea3a26ff..530fba34f6d3 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -446,6 +446,23 @@ bail:
}
/*
+ * Make sure handle has at least 'nblocks' credits available. If it does not
+ * have that many credits available, we will try to extend the handle to have
+ * enough credits. If that fails, we will restart transaction to have enough
+ * credits. Similar notes regarding data consistency and locking implications
+ * as for ocfs2_extend_trans() apply here.
+ */
+int ocfs2_assure_trans_credits(handle_t *handle, int nblocks)
+{
+ int old_nblks = jbd2_handle_buffer_credits(handle);
+
+ trace_ocfs2_assure_trans_credits(old_nblks);
+ if (old_nblks >= nblocks)
+ return 0;
+ return ocfs2_extend_trans(handle, nblocks - old_nblks);
+}
+
+/*
* If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for metadata modifications.
@@ -479,12 +496,6 @@ bail:
return status;
}
-
-struct ocfs2_triggers {
- struct jbd2_buffer_trigger_type ot_triggers;
- int ot_offset;
-};
-
static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
{
return container_of(triggers, struct ocfs2_triggers, ot_triggers);
@@ -548,85 +559,76 @@ static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh)
{
+ struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
+
mlog(ML_ERROR,
"ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
"bh->b_blocknr = %llu\n",
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
- ocfs2_error(bh->b_assoc_map->host->i_sb,
+ ocfs2_error(ot->sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
-static struct ocfs2_triggers di_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dinode, i_check),
-};
-
-static struct ocfs2_triggers eb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
-};
-
-static struct ocfs2_triggers rb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
-};
-
-static struct ocfs2_triggers gd_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
-};
-
-static struct ocfs2_triggers db_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_db_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+static void ocfs2_setup_csum_triggers(struct super_block *sb,
+ enum ocfs2_journal_trigger_type type,
+ struct ocfs2_triggers *ot)
+{
+ BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
-static struct ocfs2_triggers xb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
-};
+ switch (type) {
+ case OCFS2_JTR_DI:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
+ break;
+ case OCFS2_JTR_EB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
+ break;
+ case OCFS2_JTR_RB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
+ break;
+ case OCFS2_JTR_GD:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
+ break;
+ case OCFS2_JTR_DB:
+ ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
+ break;
+ case OCFS2_JTR_XB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
+ break;
+ case OCFS2_JTR_DQ:
+ ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
+ break;
+ case OCFS2_JTR_DR:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
+ break;
+ case OCFS2_JTR_DL:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
+ break;
+ case OCFS2_JTR_NONE:
+ /* To make compiler happy... */
+ return;
+ }
-static struct ocfs2_triggers dq_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_dq_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+ ot->ot_triggers.t_abort = ocfs2_abort_trigger;
+ ot->sb = sb;
+}
-static struct ocfs2_triggers dr_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
-};
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[])
+{
+ enum ocfs2_journal_trigger_type type;
-static struct ocfs2_triggers dl_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
-};
+ for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
+ ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
+}
static int __ocfs2_journal_access(handle_t *handle,
struct ocfs2_caching_info *ci,
@@ -708,56 +710,91 @@ static int __ocfs2_journal_access(handle_t *handle,
int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DI],
+ type);
}
int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_EB],
+ type);
}
int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_RB],
type);
}
int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_GD],
+ type);
}
int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DB],
+ type);
}
int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_XB],
+ type);
}
int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DQ],
+ type);
}
int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DR],
+ type);
}
int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DL],
+ type);
}
int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
@@ -778,13 +815,15 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
- mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
- "Aborting transaction and journal.\n");
+ mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
+ "handle type %u started at line %u, credits %u/%u "
+ "errcode %d. Aborting transaction and journal.\n",
+ handle->h_type, handle->h_line_no,
+ handle->h_requested_credits,
+ jbd2_handle_buffer_credits(handle), status);
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
- ocfs2_abort(bh->b_assoc_map->host->i_sb,
- "Journal already aborted.\n");
}
}
}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 41c9fe7e62f9..e3c3a35dc5e0 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -243,6 +243,8 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb,
int ocfs2_commit_trans(struct ocfs2_super *osb,
handle_t *handle);
int ocfs2_extend_trans(handle_t *handle, int nblocks);
+int ocfs2_assure_trans_credits(handle_t *handle,
+ int nblocks);
int ocfs2_allocate_extend_trans(handle_t *handle,
int thresh);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index a503c553bab2..8fe826143d7b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -284,6 +284,30 @@ enum ocfs2_mount_options
#define OCFS2_OSB_ERROR_FS 0x0004
#define OCFS2_DEFAULT_ATIME_QUANTUM 60
+struct ocfs2_triggers {
+ struct jbd2_buffer_trigger_type ot_triggers;
+ int ot_offset;
+ struct super_block *sb;
+};
+
+enum ocfs2_journal_trigger_type {
+ OCFS2_JTR_DI,
+ OCFS2_JTR_EB,
+ OCFS2_JTR_RB,
+ OCFS2_JTR_GD,
+ OCFS2_JTR_DB,
+ OCFS2_JTR_XB,
+ OCFS2_JTR_DQ,
+ OCFS2_JTR_DR,
+ OCFS2_JTR_DL,
+ OCFS2_JTR_NONE /* This must be the last entry */
+};
+
+#define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE
+
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[]);
+
struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
@@ -351,6 +375,9 @@ struct ocfs2_super
struct ocfs2_journal *journal;
unsigned long osb_commit_interval;
+ /* Journal triggers for checksum */
+ struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT];
+
struct delayed_work la_enable_wq;
/*
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 60e208b01c8d..0511c69c9fde 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2577,6 +2577,8 @@ DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+DEFINE_OCFS2_INT_EVENT(ocfs2_assure_trans_credits);
+
DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8aabaed2c1cb..afee70125ae3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1075,9 +1075,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
osb, &ocfs2_osb_debug_fops);
- if (ocfs2_meta_ecc(osb))
+ if (ocfs2_meta_ecc(osb)) {
+ ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers);
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
osb->osb_debug_root);
+ }
status = ocfs2_mount_volume(sb);
if (status < 0)
diff --git a/fs/open.c b/fs/open.c
index 89cafb572061..50e45bc7c4d8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -202,13 +202,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
return error;
}
-SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
+SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
+COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 116f542442dd..ab65e98a1def 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -1314,10 +1314,6 @@ static int ovl_create_tmpfile(struct file *file, struct dentry *dentry,
int flags = file->f_flags | OVL_OPEN_FLAGS;
int err;
- err = ovl_copy_up(dentry->d_parent);
- if (err)
- return err;
-
old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_setup_cred_for_create(dentry, inode, mode, old_cred);
if (err)
@@ -1360,6 +1356,10 @@ static int ovl_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (!OVL_FS(dentry->d_sb)->tmpfile)
return -EOPNOTSUPP;
+ err = ovl_copy_up(dentry->d_parent);
+ if (err)
+ return err;
+
err = ovl_want_write(dentry);
if (err)
return err;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 063409069f56..5868cb222955 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -181,6 +181,10 @@ static int ovl_check_encode_origin(struct dentry *dentry)
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
bool decodable = ofs->config.nfs_export;
+ /* No upper layer? */
+ if (!ovl_upper_mnt(ofs))
+ return 1;
+
/* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable)
return 1;
@@ -209,7 +213,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable)
+ if (d_is_dir(dentry) && decodable)
return ovl_connect_layer(dentry);
/* Lower file handle for indexed and non-upper dir/non-dir */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18550c071d71..72a1acd03675 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
- seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
+ seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f8d35f993fe5..71e5039d940d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -707,6 +707,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
#ifdef CONFIG_X86_USER_SHADOW_STACK
[ilog2(VM_SHADOW_STACK)] = "ss",
#endif
+#ifdef CONFIG_64BIT
+ [ilog2(VM_SEALED)] = "sl",
+#endif
};
size_t i;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 4a5614442dbf..ec7b2da2477a 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -282,14 +282,10 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
if (IS_ERR(file)) {
put_unused_fd(ufd);
kfree(ctx);
- return ufd;
+ return PTR_ERR(file);
}
file->f_mode |= FMODE_NOWAIT;
- /*
- * When we call this, the initialization must be complete, since
- * anon_inode_getfd() will install the fd.
- */
fd_install(ufd, file);
} else {
struct fd f = fdget(ufd);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index a665aac9be9f..6397fdefd876 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -134,7 +134,7 @@ module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
module_param(enable_gcm_256, bool, 0644);
-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
+MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
@@ -431,6 +431,7 @@ cifs_free_inode(struct inode *inode)
static void
cifs_evict_inode(struct inode *inode)
{
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_state & I_PINNING_NETFS_WB)
cifs_fscache_unuse_inode_cookie(inode, true);
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 73482734a8d8..557b68e99d0a 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1494,6 +1494,8 @@ struct cifs_aio_ctx {
struct cifs_io_request {
struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
+ struct TCP_Server_Info *server;
+ pid_t pid;
};
/* asynchronous read support */
@@ -1504,7 +1506,6 @@ struct cifs_io_subrequest {
struct cifs_io_request *req;
};
ssize_t got_bytes;
- pid_t pid;
unsigned int xid;
int result;
bool have_xid;
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c46d418c1c0c..a2072ab9e586 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2574,7 +2574,7 @@ typedef struct {
struct win_dev {
- unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
__le64 major;
__le64 minor;
} __attribute__((packed));
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 25e9ab947c17..595c4b673707 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1345,8 +1345,8 @@ cifs_async_readv(struct cifs_io_subrequest *rdata)
if (rc)
return rc;
- smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
- smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
+ smb->hdr.Pid = cpu_to_le16((__u16)rdata->req->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->req->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = rdata->req->cfile->fid.netfid;
@@ -1689,8 +1689,8 @@ cifs_async_writev(struct cifs_io_subrequest *wdata)
if (rc)
goto async_writev_out;
- smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
- smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
+ smb->hdr.Pid = cpu_to_le16((__u16)wdata->req->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->req->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = wdata->req->cfile->fid.netfid;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 9d5c2440abfc..f1f2573bb18d 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -134,17 +134,15 @@ fail:
static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
- struct TCP_Server_Info *server;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct TCP_Server_Info *server = req->server;
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
size_t rsize = 0;
int rc;
rdata->xid = get_xid();
rdata->have_xid = true;
-
- server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
rdata->server = server;
if (cifs_sb->ctx->rsize == 0)
@@ -179,15 +177,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
- struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
- pid_t pid;
int rc = 0;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = req->cfile->pid;
- else
- pid = current->tgid; // Ummm... This may be a workqueue
-
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
subreq->transferred, subreq->len);
@@ -201,16 +192,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
}
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- rdata->pid = pid;
-
- rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
- if (!rc) {
- if (rdata->req->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = rdata->server->ops->async_readv(rdata);
- }
+ rc = rdata->server->ops->async_readv(rdata);
out:
if (rc)
netfs_subreq_terminated(subreq, rc, false);
@@ -245,11 +228,15 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
rreq->rsize = cifs_sb->ctx->rsize;
rreq->wsize = cifs_sb->ctx->wsize;
+ req->pid = current->tgid; // Ummm... This may be a workqueue
if (file) {
open_file = file->private_data;
rreq->netfs_priv = file->private_data;
req->cfile = cifsFileInfo_get(open_file);
+ req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ req->pid = req->cfile->pid;
} else if (rreq->origin != NETFS_WRITEBACK) {
WARN_ON_ONCE(1);
return -EIO;
@@ -3200,8 +3187,6 @@ static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t ret;
- WARN_ON_ONCE(iov_iter_count(iter) != PAGE_SIZE);
-
if (iov_iter_rw(iter) == READ)
ret = netfs_unbuffered_read_iter_locked(iocb, iter);
else
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 262576573eb5..4a8aa1de9522 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -606,6 +606,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
+ } else if (memcmp("LnxSOCK", pbuf, 8) == 0) {
+ cifs_dbg(FYI, "Socket\n");
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cifs_dbg(FYI, "Symlink\n");
fattr->cf_mode |= S_IFLNK;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 4ce6c3121a7e..c8e536540895 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4997,6 +4997,9 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
pdev.major = cpu_to_le64(MAJOR(dev));
pdev.minor = cpu_to_le64(MINOR(dev));
break;
+ case S_IFSOCK:
+ strscpy(pdev.type, "LnxSOCK");
+ break;
case S_IFIFO:
strscpy(pdev.type, "LnxFIFO");
break;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 993ac36c3d58..2ae2dbb6202b 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4484,6 +4484,16 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
return rc;
}
+static void smb2_readv_worker(struct work_struct *work)
+{
+ struct cifs_io_subrequest *rdata =
+ container_of(work, struct cifs_io_subrequest, subreq.work);
+
+ netfs_subreq_terminated(&rdata->subreq,
+ (rdata->result == 0 || rdata->result == -EAGAIN) ?
+ rdata->got_bytes : rdata->result, true);
+}
+
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
@@ -4577,12 +4587,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->subreq.start < rdata->subreq.rreq->i_size)
rdata->result = 0;
}
- if (rdata->result == 0 || rdata->result == -EAGAIN)
- iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
rdata->credits.value = 0;
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result, true);
+ INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
+ queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -4614,7 +4621,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
io_parms.length = rdata->subreq.len;
io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
- io_parms.pid = rdata->pid;
+ io_parms.pid = rdata->req->pid;
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
@@ -4789,7 +4796,6 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->result = -ENOSPC;
else
wdata->subreq.len = written;
- iov_iter_advance(&wdata->subreq.io_iter, written);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
@@ -4867,7 +4873,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
.length = wdata->subreq.len,
.persistent_fid = wdata->req->cfile->fid.persistent_fid,
.volatile_fid = wdata->req->cfile->fid.volatile_fid,
- .pid = wdata->pid,
+ .pid = wdata->req->pid,
};
io_parms = &_io_parms;
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 02135a605305..1476c445cadc 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -216,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
}
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
if (!tcon) {
- cifs_put_smb_ses(ses);
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_put_smb_ses(ses);
return NULL;
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/smb/common/cifs_arc4.c b/fs/smb/common/cifs_arc4.c
index 043e4cb839fa..df360ca47826 100644
--- a/fs/smb/common/cifs_arc4.c
+++ b/fs/smb/common/cifs_arc4.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include "arc4.h"
+MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_LICENSE("GPL");
int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
diff --git a/fs/smb/common/cifs_md4.c b/fs/smb/common/cifs_md4.c
index 50f78cfc6ce9..7ee7f4dad90c 100644
--- a/fs/smb/common/cifs_md4.c
+++ b/fs/smb/common/cifs_md4.c
@@ -24,6 +24,7 @@
#include <asm/byteorder.h>
#include "md4.h"
+MODULE_DESCRIPTION("MD4 Message Digest Algorithm (RFC1320)");
MODULE_LICENSE("GPL");
static inline u32 lshift(u32 x, unsigned int s)
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index b6c5a8ea3887..e7e07891781b 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -630,6 +630,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
+ if (*name == '\\') {
+ pr_err("not allow directory name included leading slash\n");
+ kfree(name);
+ return ERR_PTR(-EINVAL);
+ }
+
ksmbd_conv_path_to_unix(name);
ksmbd_strip_last_slash(name);
return name;
@@ -2361,7 +2367,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
if (rc > 0) {
rc = ksmbd_vfs_remove_xattr(idmap,
path,
- attr_name);
+ attr_name,
+ get_write);
if (rc < 0) {
ksmbd_debug(SMB,
@@ -2376,7 +2383,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
} else {
rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
le16_to_cpu(eabuf->EaValueLength),
- 0, true);
+ 0, get_write);
if (rc < 0) {
ksmbd_debug(SMB,
"ksmbd_vfs_setxattr is failed(%d)\n",
@@ -2468,7 +2475,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
STREAM_PREFIX_LEN)) {
err = ksmbd_vfs_remove_xattr(idmap, path,
- name);
+ name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n",
name);
@@ -2842,20 +2849,11 @@ int smb2_open(struct ksmbd_work *work)
}
if (req->NameLength) {
- if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
- *(char *)req->Buffer == '\\') {
- pr_err("not allow directory name included leading slash\n");
- rc = -EINVAL;
- goto err_out2;
- }
-
name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
le16_to_cpu(req->NameLength),
work->conn->local_nls);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
- if (rc != -ENOMEM)
- rc = -ENOENT;
name = NULL;
goto err_out2;
}
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 51b1b0bed616..9e859ba010cf 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1058,16 +1058,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
}
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name)
+ const struct path *path, char *attr_name,
+ bool get_write)
{
int err;
- err = mnt_want_write(path->mnt);
- if (err)
- return err;
+ if (get_write == true) {
+ err = mnt_want_write(path->mnt);
+ if (err)
+ return err;
+ }
err = vfs_removexattr(idmap, path->dentry, attr_name);
- mnt_drop_write(path->mnt);
+
+ if (get_write == true)
+ mnt_drop_write(path->mnt);
return err;
}
@@ -1380,7 +1385,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
- err = ksmbd_vfs_remove_xattr(idmap, path, name);
+ err = ksmbd_vfs_remove_xattr(idmap, path, name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
}
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index cfe1c8092f23..cb76f4b5bafe 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -114,7 +114,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name);
+ const struct path *path, char *attr_name,
+ bool get_write);
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
unsigned int flags, struct path *parent_path,
struct path *path, bool caseless);
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 6cb599cd287e..8b2e37c8716e 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -254,7 +254,8 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
&filp->f_path,
- fp->stream.name);
+ fp->stream.name,
+ true);
if (err)
pr_err("remove xattr failed : %s\n",
fp->stream.name);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 6cb8b2ddc541..6c55a6e88eba 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1008,13 +1008,12 @@ xfs_alloc_cur_finish(
struct xfs_alloc_arg *args,
struct xfs_alloc_cur *acur)
{
- struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
int error;
ASSERT(acur->cnt && acur->bnolt);
ASSERT(acur->bno >= acur->rec_bno);
ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
- ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
+ ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
acur->rec_len, acur->bno, acur->len, 0);
@@ -1217,7 +1216,6 @@ STATIC int /* error */
xfs_alloc_ag_vextent_exact(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
- struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
int error;
@@ -1297,7 +1295,7 @@ xfs_alloc_ag_vextent_exact(
*/
cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
- ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
+ ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
if (error) {
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 430cd3244c14..f30bcc64100d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -329,26 +329,20 @@ xfs_attr_calc_size(
return nblks;
}
-/* Initialize transaction reservation for attr operations */
-void
-xfs_init_attr_trans(
- struct xfs_da_args *args,
- struct xfs_trans_res *tres,
- unsigned int *total)
+/* Initialize transaction reservation for an xattr set/replace/upsert */
+inline struct xfs_trans_res
+xfs_attr_set_resv(
+ const struct xfs_da_args *args)
{
- struct xfs_mount *mp = args->dp->i_mount;
-
- if (args->value) {
- tres->tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
- M_RES(mp)->tr_attrsetrt.tr_logres *
- args->total;
- tres->tr_logcount = XFS_ATTRSET_LOG_COUNT;
- tres->tr_logflags = XFS_TRANS_PERM_LOG_RES;
- *total = args->total;
- } else {
- *tres = M_RES(mp)->tr_attrrm;
- *total = XFS_ATTRRM_SPACE_RES(mp);
- }
+ struct xfs_mount *mp = args->dp->i_mount;
+ struct xfs_trans_res ret = {
+ .tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+ M_RES(mp)->tr_attrsetrt.tr_logres * args->total,
+ .tr_logcount = XFS_ATTRSET_LOG_COUNT,
+ .tr_logflags = XFS_TRANS_PERM_LOG_RES,
+ };
+
+ return ret;
}
/*
@@ -1006,7 +1000,7 @@ xfs_attr_set(
struct xfs_trans_res tres;
int error, local;
int rmt_blks = 0;
- unsigned int total;
+ unsigned int total = 0;
ASSERT(!args->trans);
@@ -1033,10 +1027,15 @@ xfs_attr_set(
if (!local)
rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
+
+ tres = xfs_attr_set_resv(args);
+ total = args->total;
break;
case XFS_ATTRUPDATE_REMOVE:
XFS_STATS_INC(mp, xs_attr_remove);
rmt_blks = xfs_attr3_max_rmt_blocks(mp);
+ tres = M_RES(mp)->tr_attrrm;
+ total = XFS_ATTRRM_SPACE_RES(mp);
break;
}
@@ -1044,7 +1043,6 @@ xfs_attr_set(
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
- xfs_init_attr_trans(args, &tres, &total);
error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 088cb7b30168..0e51d0723f9a 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -565,8 +565,7 @@ bool xfs_attr_check_namespace(unsigned int attr_flags);
bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
size_t length);
int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
-void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
- unsigned int *total);
+struct xfs_trans_res xfs_attr_set_resv(const struct xfs_da_args *args);
/*
* Check to see if the attr should be upgraded from non-existent or shortform to
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3b3206d312d6..6af6f744fdd6 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4058,20 +4058,32 @@ xfs_bmapi_reserve_delalloc(
xfs_extlen_t indlen;
uint64_t fdblocks;
int error;
- xfs_fileoff_t aoff = off;
+ xfs_fileoff_t aoff;
+ bool use_cowextszhint =
+ whichfork == XFS_COW_FORK && !prealloc;
+retry:
/*
* Cap the alloc length. Keep track of prealloc so we know whether to
* tag the inode before we return.
*/
+ aoff = off;
alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
if (!eof)
alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
if (prealloc && alen >= len)
prealloc = alen - len;
- /* Figure out the extent size, adjust alen */
- if (whichfork == XFS_COW_FORK) {
+ /*
+ * If we're targetting the COW fork but aren't creating a speculative
+ * posteof preallocation, try to expand the reservation to align with
+ * the COW extent size hint if there's sufficient free space.
+ *
+ * Unlike the data fork, the CoW cancellation functions will free all
+ * the reservations at inactivation, so we don't require that every
+ * delalloc reservation have a dirty pagecache.
+ */
+ if (use_cowextszhint) {
struct xfs_bmbt_irec prev;
xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
@@ -4090,7 +4102,7 @@ xfs_bmapi_reserve_delalloc(
*/
error = xfs_quota_reserve_blkres(ip, alen);
if (error)
- return error;
+ goto out;
/*
* Split changing sb for alen and indlen since they could be coming
@@ -4140,6 +4152,17 @@ out_unreserve_frextents:
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_quota_unreserve_blkres(ip, alen);
+out:
+ if (error == -ENOSPC || error == -EDQUOT) {
+ trace_xfs_delalloc_enospc(ip, off, len);
+
+ if (prealloc || use_cowextszhint) {
+ /* retry without any preallocation */
+ use_cowextszhint = false;
+ prealloc = 0;
+ goto retry;
+ }
+ }
return error;
}
@@ -6383,6 +6406,7 @@ xfs_bunmapi_range(
error = xfs_defer_finish(tpp);
if (error)
goto out;
+ cond_resched();
}
out:
return error;
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 97996cb79aaa..454b63ef7201 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -996,7 +996,7 @@ struct xfs_getparents_by_handle {
#define XFS_IOC_FSGEOMETRY _IOR ('X', 126, struct xfs_fsop_geom)
#define XFS_IOC_BULKSTAT _IOR ('X', 127, struct xfs_bulkstat_req)
#define XFS_IOC_INUMBERS _IOR ('X', 128, struct xfs_inumbers_req)
-#define XFS_IOC_EXCHANGE_RANGE _IOWR('X', 129, struct xfs_exchange_range)
+#define XFS_IOC_EXCHANGE_RANGE _IOW ('X', 129, struct xfs_exchange_range)
/* XFS_IOC_GETFSUUID ---------- deprecated 140 */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index d79002343d0b..513b50da6215 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -374,17 +374,40 @@ xfs_dinode_verify_fork(
/*
* For fork types that can contain local data, check that the fork
* format matches the size of local data contained within the fork.
- *
- * For all types, check that when the size says the should be in extent
- * or btree format, the inode isn't claiming it is in local format.
*/
if (whichfork == XFS_DATA_FORK) {
- if (S_ISDIR(mode) || S_ISLNK(mode)) {
+ /*
+ * A directory small enough to fit in the inode must be stored
+ * in local format. The directory sf <-> extents conversion
+ * code updates the directory size accordingly. Directories
+ * being truncated have zero size and are not subject to this
+ * check.
+ */
+ if (S_ISDIR(mode)) {
+ if (dip->di_size &&
+ be64_to_cpu(dip->di_size) <= fork_size &&
+ fork_format != XFS_DINODE_FMT_LOCAL)
+ return __this_address;
+ }
+
+ /*
+ * A symlink with a target small enough to fit in the inode can
+ * be stored in extents format if xattrs were added (thus
+ * converting the data fork from shortform to remote format)
+ * and then removed.
+ */
+ if (S_ISLNK(mode)) {
if (be64_to_cpu(dip->di_size) <= fork_size &&
+ fork_format != XFS_DINODE_FMT_EXTENTS &&
fork_format != XFS_DINODE_FMT_LOCAL)
return __this_address;
}
+ /*
+ * For all types, check that when the size says the fork should
+ * be in extent or btree format, the inode isn't claiming to be
+ * in local format.
+ */
if (be64_to_cpu(dip->di_size) > fork_size &&
fork_format == XFS_DINODE_FMT_LOCAL)
return __this_address;
@@ -508,9 +531,19 @@ xfs_dinode_verify(
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
return __this_address;
- /* No zero-length symlinks/dirs. */
- if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
- return __this_address;
+ /*
+ * No zero-length symlinks/dirs unless they're unlinked and hence being
+ * inactivated.
+ */
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) {
+ if (dip->di_version > 1) {
+ if (dip->di_nlink)
+ return __this_address;
+ } else {
+ if (dip->di_onlink)
+ return __this_address;
+ }
+ }
fa = xfs_dinode_verify_nrext64(mp, dip);
if (fa)
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 09e4bf949bf8..6b56f0f6d4c1 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1038,11 +1038,12 @@ xfs_log_sb(
* and hence we don't need have to update it here.
*/
if (xfs_has_lazysbcount(mp)) {
- mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
+ mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount);
mp->m_sb.sb_ifree = min_t(uint64_t,
- percpu_counter_sum(&mp->m_ifree),
+ percpu_counter_sum_positive(&mp->m_ifree),
mp->m_sb.sb_icount);
- mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
+ mp->m_sb.sb_fdblocks =
+ percpu_counter_sum_positive(&mp->m_fdblocks);
}
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index c013f0ba4f36..4cbcf7a86dbe 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -856,7 +856,7 @@ xfs_ioc_scrubv_metadata(
if (vec_bytes > PAGE_SIZE)
return -ENOMEM;
- uvectors = (void __user *)(uintptr_t)head.svh_vectors;
+ uvectors = u64_to_user_ptr(head.svh_vectors);
vectors = memdup_user(uvectors, vec_bytes);
if (IS_ERR(vectors))
return PTR_ERR(vectors);
diff --git a/fs/xfs/scrub/xfarray.c b/fs/xfs/scrub/xfarray.c
index 9185ae7088d4..cdd13ed9c569 100644
--- a/fs/xfs/scrub/xfarray.c
+++ b/fs/xfs/scrub/xfarray.c
@@ -822,12 +822,14 @@ xfarray_sort_scan(
/* Grab the first folio that backs this array element. */
if (!si->folio) {
+ struct folio *folio;
loff_t next_pos;
- si->folio = xfile_get_folio(si->array->xfile, idx_pos,
+ folio = xfile_get_folio(si->array->xfile, idx_pos,
si->array->obj_size, XFILE_ALLOC);
- if (IS_ERR(si->folio))
- return PTR_ERR(si->folio);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ si->folio = folio;
si->first_folio_idx = xfarray_idx(si->array,
folio_pos(si->folio) + si->array->obj_size - 1);
@@ -1048,6 +1050,7 @@ xfarray_sort(
out_free:
trace_xfarray_sort_stats(si, error);
+ xfarray_sort_scan_done(si);
kvfree(si);
return error;
}
diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c
index 2b10ac4c5fce..f683b7a9323f 100644
--- a/fs/xfs/xfs_attr_item.c
+++ b/fs/xfs/xfs_attr_item.c
@@ -746,7 +746,7 @@ xfs_attr_recover_work(
struct xfs_attri_log_format *attrp;
struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
int error;
- int total;
+ unsigned int total = 0;
/*
* First check the validity of the attr described by the ATTRI. If any
@@ -763,7 +763,20 @@ xfs_attr_recover_work(
return PTR_ERR(attr);
args = attr->xattri_da_args;
- xfs_init_attr_trans(args, &resv, &total);
+ switch (xfs_attr_intent_op(attr)) {
+ case XFS_ATTRI_OP_FLAGS_PPTR_SET:
+ case XFS_ATTRI_OP_FLAGS_PPTR_REPLACE:
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+ resv = xfs_attr_set_resv(args);
+ total = args->total;
+ break;
+ case XFS_ATTRI_OP_FLAGS_PPTR_REMOVE:
+ case XFS_ATTRI_OP_FLAGS_REMOVE:
+ resv = M_RES(mp)->tr_attrrm;
+ total = XFS_ATTRRM_SPACE_RES(mp);
+ break;
+ }
resv = xlog_recover_resv(&resv);
error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp);
if (error)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index ac2e77ebb54c..a4d9fbc21b83 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -486,13 +486,11 @@ out_unlock:
/*
* Test whether it is appropriate to check an inode for and free post EOF
- * blocks. The 'force' parameter determines whether we should also consider
- * regular files that are marked preallocated or append-only.
+ * blocks.
*/
bool
xfs_can_free_eofblocks(
- struct xfs_inode *ip,
- bool force)
+ struct xfs_inode *ip)
{
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
@@ -526,11 +524,11 @@ xfs_can_free_eofblocks(
return false;
/*
- * Do not free real preallocated or append-only files unless the file
- * has delalloc blocks and we are forced to remove them.
+ * Only free real extents for inodes with persistent preallocations or
+ * the append-only flag.
*/
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
- if (!force || ip->i_delayed_blks == 0)
+ if (ip->i_delayed_blks == 0)
return false;
/*
@@ -584,6 +582,22 @@ xfs_free_eofblocks(
/* Wait on dio to ensure i_size has settled. */
inode_dio_wait(VFS_I(ip));
+ /*
+ * For preallocated files only free delayed allocations.
+ *
+ * Note that this means we also leave speculative preallocations in
+ * place for preallocated files.
+ */
+ if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
+ if (ip->i_delayed_blks) {
+ xfs_bmap_punch_delalloc_range(ip,
+ round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
+ LLONG_MAX);
+ }
+ xfs_inode_clear_eofblocks_tag(ip);
+ return 0;
+ }
+
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
ASSERT(xfs_is_shutdown(mp));
@@ -891,7 +905,7 @@ xfs_prepare_shift(
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
*/
- if (xfs_can_free_eofblocks(ip, true)) {
+ if (xfs_can_free_eofblocks(ip)) {
error = xfs_free_eofblocks(ip);
if (error)
return error;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 51f84d8ff372..eb0895bfb9da 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -63,7 +63,7 @@ int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
/* EOF block manipulation functions */
-bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
+bool xfs_can_free_eofblocks(struct xfs_inode *ip);
int xfs_free_eofblocks(struct xfs_inode *ip);
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
diff --git a/fs/xfs/xfs_handle.c b/fs/xfs/xfs_handle.c
index c8785ed59543..a3f16e9b6fe5 100644
--- a/fs/xfs/xfs_handle.c
+++ b/fs/xfs/xfs_handle.c
@@ -773,11 +773,6 @@ xfs_getparents_expand_lastrec(
trace_xfs_getparents_expand_lastrec(gpx->ip, gp, &gpx->context, gpr);
}
-static inline void __user *u64_to_uptr(u64 val)
-{
- return (void __user *)(uintptr_t)val;
-}
-
/* Retrieve the parent pointers for a given inode. */
STATIC int
xfs_getparents(
@@ -862,7 +857,7 @@ xfs_getparents(
ASSERT(gpx->context.firstu <= gpx->gph.gph_request.gp_bufsize);
/* Copy the records to userspace. */
- if (copy_to_user(u64_to_uptr(gpx->gph.gph_request.gp_buffer),
+ if (copy_to_user(u64_to_user_ptr(gpx->gph.gph_request.gp_buffer),
gpx->krecords, gpx->context.firstu))
error = -EFAULT;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 0953163a2d84..9967334ea99f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1155,7 +1155,7 @@ xfs_inode_free_eofblocks(
}
*lockflags |= XFS_IOLOCK_EXCL;
- if (xfs_can_free_eofblocks(ip, false))
+ if (xfs_can_free_eofblocks(ip))
return xfs_free_eofblocks(ip);
/* inode could be preallocated or append-only */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 58fb7a5062e1..a4e3cd8971fc 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -42,6 +42,7 @@
#include "xfs_pnfs.h"
#include "xfs_parent.h"
#include "xfs_xattr.h"
+#include "xfs_sb.h"
struct kmem_cache *xfs_inode_cache;
@@ -870,9 +871,16 @@ xfs_init_new_inode(
* this saves us from needing to run a separate transaction to set the
* fork offset in the immediate future.
*/
- if (init_xattrs && xfs_has_attr(mp)) {
+ if (init_xattrs) {
ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
+
+ if (!xfs_has_attr(mp)) {
+ spin_lock(&mp->m_sb_lock);
+ xfs_add_attr(mp);
+ spin_unlock(&mp->m_sb_lock);
+ xfs_log_sb(tp);
+ }
}
/*
@@ -1595,7 +1603,7 @@ xfs_release(
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
return 0;
- if (xfs_can_free_eofblocks(ip, false)) {
+ if (xfs_can_free_eofblocks(ip)) {
/*
* Check if the inode is being opened, written and closed
* frequently and we have delayed allocation blocks outstanding
@@ -1856,15 +1864,13 @@ xfs_inode_needs_inactive(
/*
* This file isn't being freed, so check if there are post-eof blocks
- * to free. @force is true because we are evicting an inode from the
- * cache. Post-eof blocks must be freed, lest we end up with broken
- * free space accounting.
+ * to free.
*
* Note: don't bother with iolock here since lockdep complains about
* acquiring it in reclaim context. We have the only reference to the
* inode at this point anyways.
*/
- return xfs_can_free_eofblocks(ip, true);
+ return xfs_can_free_eofblocks(ip);
}
/*
@@ -1947,15 +1953,11 @@ xfs_inactive(
if (VFS_I(ip)->i_nlink != 0) {
/*
- * force is true because we are evicting an inode from the
- * cache. Post-eof blocks must be freed, lest we end up with
- * broken free space accounting.
- *
* Note: don't bother with iolock here since lockdep complains
* about acquiring it in reclaim context. We have the only
* reference to the inode at this point anyways.
*/
- if (xfs_can_free_eofblocks(ip, true))
+ if (xfs_can_free_eofblocks(ip))
error = xfs_free_eofblocks(ip);
goto out;
@@ -2548,11 +2550,26 @@ xfs_ifree_cluster(
* This buffer may not have been correctly initialised as we
* didn't read it from disk. That's not important because we are
* only using to mark the buffer as stale in the log, and to
- * attach stale cached inodes on it. That means it will never be
- * dispatched for IO. If it is, we want to know about it, and we
- * want it to fail. We can acheive this by adding a write
- * verifier to the buffer.
+ * attach stale cached inodes on it.
+ *
+ * For the inode that triggered the cluster freeing, this
+ * attachment may occur in xfs_inode_item_precommit() after we
+ * have marked this buffer stale. If this buffer was not in
+ * memory before xfs_ifree_cluster() started, it will not be
+ * marked XBF_DONE and this will cause problems later in
+ * xfs_inode_item_precommit() when we trip over a (stale, !done)
+ * buffer to attached to the transaction.
+ *
+ * Hence we have to mark the buffer as XFS_DONE here. This is
+ * safe because we are also marking the buffer as XBF_STALE and
+ * XFS_BLI_STALE. That means it will never be dispatched for
+ * IO and it won't be unlocked until the cluster freeing has
+ * been committed to the journal and the buffer unpinned. If it
+ * is written, we want to know about it, and we want it to
+ * fail. We can acheive this by adding a write verifier to the
+ * buffer.
*/
+ bp->b_flags |= XBF_DONE;
bp->b_ops = &xfs_inode_buf_ops;
/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 378342673925..414903885ab9 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1148,33 +1148,23 @@ xfs_buffered_write_iomap_begin(
}
}
-retry:
- error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks,
- allocfork == XFS_DATA_FORK ? &imap : &cmap,
- allocfork == XFS_DATA_FORK ? &icur : &ccur,
- allocfork == XFS_DATA_FORK ? eof : cow_eof);
- switch (error) {
- case 0:
- break;
- case -ENOSPC:
- case -EDQUOT:
- /* retry without any preallocation */
- trace_xfs_delalloc_enospc(ip, offset, count);
- if (prealloc_blocks) {
- prealloc_blocks = 0;
- goto retry;
- }
- fallthrough;
- default:
- goto out_unlock;
- }
-
if (allocfork == XFS_COW_FORK) {
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks, &cmap,
+ &ccur, cow_eof);
+ if (error)
+ goto out_unlock;
+
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
goto found_cow;
}
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
+ eof);
+ if (error)
+ goto out_unlock;
+
/*
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 730c8d48da28..86f14ec7c31f 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -351,7 +351,6 @@ xfs_iwalk_run_callbacks(
int *has_more)
{
struct xfs_mount *mp = iwag->mp;
- struct xfs_inobt_rec_incore *irec;
xfs_agino_t next_agino;
int error;
@@ -361,8 +360,8 @@ xfs_iwalk_run_callbacks(
/* Delete cursor but remember the last record we cached... */
xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0);
- irec = &iwag->recs[iwag->nr_recs - 1];
- ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
+ ASSERT(next_agino >= iwag->recs[iwag->nr_recs - 1].ir_startino +
+ XFS_INODES_PER_CHUNK);
if (iwag->drop_trans) {
xfs_trans_cancel(iwag->tp);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 063a2e00d169..265a2a418bc7 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1387,6 +1387,7 @@ xfs_reflink_remap_blocks(
destoff += imap.br_blockcount;
len -= imap.br_blockcount;
remapped_len += imap.br_blockcount;
+ cond_resched();
}
if (error)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 94d0fc3bd412..80dc36f9d527 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -663,6 +663,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_execute_orphan_reg_method(acpi_handle device,
+ acpi_adr_space_type
+ space_id))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_remove_address_space_handler(acpi_handle
device,
acpi_adr_space_type
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index 933ca6581aba..fabcefe8a80a 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -19,7 +19,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifndef sys_mmap
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+ unsigned long fd, unsigned long off);
#endif
#ifndef sys_rt_sigreturn
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 82570f77e817..2a74fa9d0ce5 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -56,8 +56,8 @@ struct drm_buddy_block {
struct list_head tmp_link;
};
-/* Order-zero must be at least PAGE_SIZE */
-#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+/* Order-zero must be at least SZ_4K */
+#define DRM_BUDDY_MAX_ORDER (63 - 12)
/*
* Binary Buddy System.
@@ -85,7 +85,7 @@ struct drm_buddy {
unsigned int n_roots;
unsigned int max_order;
- /* Must be at least PAGE_SIZE */
+ /* Must be at least SZ_4K */
u64 chunk_size;
u64 size;
u64 avail;
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
new file mode 100644
index 000000000000..03ebb1d23953
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6357_AUXADC_BATADC 0
+#define MT6357_AUXADC_ISENSE 1
+#define MT6357_AUXADC_VCDT 2
+#define MT6357_AUXADC_BAT_TEMP 3
+#define MT6357_AUXADC_CHIP_TEMP 4
+#define MT6357_AUXADC_ACCDET 5
+#define MT6357_AUXADC_VDCXO 6
+#define MT6357_AUXADC_TSX_TEMP 7
+#define MT6357_AUXADC_HPOFS_CAL 8
+#define MT6357_AUXADC_DCXO_TEMP 9
+#define MT6357_AUXADC_VCORE_TEMP 10
+#define MT6357_AUXADC_VPROC_TEMP 11
+#define MT6357_AUXADC_VBAT 12
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
new file mode 100644
index 000000000000..efa08398fafd
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6358_AUXADC_BATADC 0
+#define MT6358_AUXADC_VCDT 1
+#define MT6358_AUXADC_BAT_TEMP 2
+#define MT6358_AUXADC_CHIP_TEMP 3
+#define MT6358_AUXADC_ACCDET 4
+#define MT6358_AUXADC_VDCXO 5
+#define MT6358_AUXADC_TSX_TEMP 6
+#define MT6358_AUXADC_HPOFS_CAL 7
+#define MT6358_AUXADC_DCXO_TEMP 8
+#define MT6358_AUXADC_VBIF 9
+#define MT6358_AUXADC_VCORE_TEMP 10
+#define MT6358_AUXADC_VPROC_TEMP 11
+#define MT6358_AUXADC_VGPU_TEMP 12
+#define MT6358_AUXADC_VBAT 13
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
new file mode 100644
index 000000000000..59826393ee7e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6359_AUXADC_BATADC 0
+#define MT6359_AUXADC_BAT_TEMP 1
+#define MT6359_AUXADC_CHIP_TEMP 2
+#define MT6359_AUXADC_ACCDET 3
+#define MT6359_AUXADC_VDCXO 4
+#define MT6359_AUXADC_TSX_TEMP 5
+#define MT6359_AUXADC_HPOFS_CAL 6
+#define MT6359_AUXADC_DCXO_TEMP 7
+#define MT6359_AUXADC_VBIF 8
+#define MT6359_AUXADC_VCORE_TEMP 9
+#define MT6359_AUXADC_VPROC_TEMP 10
+#define MT6359_AUXADC_VGPU_TEMP 11
+#define MT6359_AUXADC_VBAT 12
+#define MT6359_AUXADC_IBAT 13
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
index 6fc4b445d3a1..b8a4f3ff4a3b 100644
--- a/include/dt-bindings/net/ti-dp83867.h
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83867 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2015 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83867_H
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
index 218b1a64e975..917114aad7d0 100644
--- a/include/dt-bindings/net/ti-dp83869.h
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83869 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2019 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83869_H
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 956bcba5dbf2..2f9d36b72bd8 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index debd487fe971..9409a6ddf3e0 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1349,7 +1349,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -2927,7 +2927,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4505,7 +4505,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// ce5b65e0f1f8a276268b667194581d24bed219d4
+// 8829b337928e9508259079d32581775ececd415b
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 3ef844b3ab8a..f86b29d90877 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d5379548d684..818e93612947 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -731,6 +731,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
+void bio_integrity_unmap_free_user(struct bio *bio);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@@ -807,6 +808,9 @@ static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
{
return -EINVAL;
}
+static inline void bio_integrity_unmap_free_user(struct bio *bio)
+{
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index e253e7bd0d17..7428cb43952d 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -66,12 +66,6 @@ blk_integrity_queue_supports_integrity(struct request_queue *q)
return q->integrity.profile;
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
-
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@@ -151,10 +145,6 @@ static inline void blk_integrity_register(struct gendisk *d,
static inline void blk_integrity_unregister(struct gendisk *d)
{
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aefdda9f4ec7..24c36929920b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -186,6 +186,7 @@ struct gendisk {
*/
unsigned int nr_zones;
unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
unsigned long *conv_zones_bitmap;
unsigned int zone_wplugs_hash_bits;
spinlock_t zone_wplugs_lock;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 50aa87f8d77f..e4070fb02b11 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -746,6 +746,8 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f9e56fd12a9f..7c3e40c3295e 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -82,7 +82,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used noinline
+#define __bpf_kfunc __used __retain noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 98c6fd0b39b6..fdfb61ccf55a 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -77,7 +77,7 @@ struct cdrom_device_ops {
unsigned int clearing, int slot);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
- int (*select_speed) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
diff --git a/include/linux/closure.h b/include/linux/closure.h
index 99155df162d0..59b8c06b11ff 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -285,6 +285,21 @@ static inline void closure_get(struct closure *cl)
}
/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
* closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl)
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
/**
* closure_wake_up - wake up all closures on a wait list,
* with memory barrier
@@ -355,6 +376,8 @@ do { \
*/
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+void closure_return_sync(struct closure *cl);
+
/**
* continue_at_nobarrier - jump to another function without barrier
*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 233f61ec8afc..56cebaff0c91 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -608,7 +608,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 93600de3800b..f14c275950b5 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -143,6 +143,29 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index ae80a303c216..ca32b5bb28eb 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -277,4 +277,12 @@ do { \
__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */
+#define dev_err_ptr_probe(dev, ___err, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, ___err, fmt, ##__VA_ARGS__))
+
+/* Simple helper for dev_err_probe() when ERR_CAST() is to be returned. */
+#define dev_err_cast_probe(dev, ___err_ptr, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, PTR_ERR(___err_ptr), fmt, ##__VA_ARGS__))
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/device.h b/include/linux/device.h
index fc3bd7116ab9..ace039151cb8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1220,8 +1220,6 @@ static inline void device_remove_group(struct device *dev,
return device_remove_groups(dev, groups);
}
-int __must_check devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups);
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 752dbde4cec1..9fc03068cabc 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -161,6 +161,16 @@ struct dma_interleaved_template {
};
/**
+ * struct dma_vec - DMA vector
+ * @addr: Bus address of the start of the vector
+ * @len: Length in bytes of the DMA vector
+ */
+struct dma_vec {
+ dma_addr_t addr;
+ size_t len;
+};
+
+/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
@@ -910,6 +920,10 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
+ struct dma_chan *chan, const struct dma_vec *vecs,
+ size_t nents, enum dma_transfer_direction direction,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -973,6 +987,25 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
dir, flags, NULL);
}
+/**
+ * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
+ * @chan: The channel to be used for this descriptor
+ * @vecs: The array of DMA vectors that should be transferred
+ * @nents: The number of DMA vectors in the array
+ * @dir: Specifies the direction of the data transfer
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
+ struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
+ return NULL;
+
+ return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
+ dir, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
deleted file mode 100644
index 34c2175e6a1e..000000000000
--- a/include/linux/eeprom_93xx46.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Module: eeprom_93xx46
- * platform description for 93xx46 EEPROMs.
- */
-#include <linux/gpio/consumer.h>
-
-struct eeprom_93xx46_platform_data {
- unsigned char flags;
-#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
-#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
-#define EE_READONLY 0x08 /* forbid writing */
-#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
-#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
-#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
-
- unsigned int quirks;
-/* Single word read transfers only; no sequential read. */
-#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
-/* Instructions such as EWEN are (addrlen + 2) in length. */
-#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
-/* Add extra cycle after address during a read */
-#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
-
- /*
- * optional hooks to control additional logic
- * before and after spi transfer.
- */
- void (*prepare)(void *);
- void (*finish)(void *);
- struct gpio_desc *select;
-};
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2ad1ffa4ccb9..0ed47d00549b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -636,6 +636,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
}
}
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0f12cf01070e..5669da513cd7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1208,18 +1208,18 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- const char *ret = __bpf_address_lookup(addr, size, off, sym);
+ int ret = __bpf_address_lookup(addr, size, off, sym);
if (ret && modname)
*modname = NULL;
@@ -1263,11 +1263,11 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-static inline const char *
+static inline int
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- return NULL;
+ return 0;
}
static inline bool is_bpf_text_address(unsigned long addr)
@@ -1286,11 +1286,11 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
return NULL;
}
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 800995c425e0..b792274189a3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -86,15 +86,15 @@ struct ftrace_hash;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
#else
-static inline const char *
+static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8d3ec116e29..2aa986a5cd1b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -269,8 +269,8 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_ALLOC,
MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_ANON_SWPOUT,
- MTHP_STAT_ANON_SWPOUT_FALLBACK,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
__MTHP_STAT_COUNT
};
@@ -278,6 +278,7 @@ struct mthp_stat {
unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
};
+#ifdef CONFIG_SYSFS
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
@@ -287,6 +288,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
this_cpu_inc(mthp_stats.stats[order][item]);
}
+#else
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 5e6cd43a6dbd..424acb98c7c2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -852,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -961,8 +960,6 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#define builtin_i2c_driver(__i2c_driver) \
builtin_driver(__i2c_driver, i2c_add_driver)
-#endif /* I2C */
-
/* must call put_device() when done with returned i2c_client device */
struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
@@ -972,6 +969,28 @@ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+#else /* I2C */
+
+static inline struct i2c_client *
+i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+#endif /* !I2C */
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 383614ebd760..f8c1d2505940 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -37,6 +37,10 @@ struct iio_dev;
* @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
* @disable_all: Will be called to disable all channels, may be NULL.
+ * @disable_one: Will be called to disable a single channel after
+ * ad_sigma_delta_single_conversion(), may be NULL.
+ * Usage of this callback expects iio_chan_spec.address to contain
+ * the value required for the driver to identify the channel.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
@@ -55,6 +59,7 @@ struct ad_sigma_delta_info {
int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
int (*disable_all)(struct ad_sigma_delta *);
+ int (*disable_one)(struct ad_sigma_delta *, unsigned int chan);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
unsigned int addr_shift;
@@ -140,6 +145,15 @@ static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
return 0;
}
+static inline int ad_sigma_delta_disable_one(struct ad_sigma_delta *sd,
+ unsigned int chan)
+{
+ if (sd->info->disable_one)
+ return sd->info->disable_one(sd, chan);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 6e27e47077d5..5eb66a399002 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -7,6 +7,7 @@
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
@@ -16,6 +17,9 @@
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
+struct dma_buf_attachment;
+struct dma_fence;
+struct sg_table;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
@@ -41,6 +45,10 @@ enum iio_block_state {
* @queue: Parent DMA buffer queue
* @kref: kref used to manage the lifetime of block
* @state: Current state of the block
+ * @cyclic: True if this is a cyclic buffer
+ * @fileio: True if this buffer is used for fileio mode
+ * @sg_table: DMA table for the transfer when transferring a DMABUF
+ * @fence: DMA fence to be signaled when a DMABUF transfer is complete
*/
struct iio_dma_buffer_block {
/* May only be accessed by the owner of the block */
@@ -63,6 +71,12 @@ struct iio_dma_buffer_block {
* queue->list_lock if the block is not owned by the core.
*/
enum iio_block_state state;
+
+ bool cyclic;
+ bool fileio;
+
+ struct sg_table *sg_table;
+ struct dma_fence *fence;
};
/**
@@ -72,6 +86,7 @@ struct iio_dma_buffer_block {
* @pos: Read offset in the active block
* @block_size: Size of each block
* @next_dequeue: index of next block that will be dequeued
+ * @enabled: Whether the buffer is operating in fileio mode
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
@@ -80,6 +95,7 @@ struct iio_dma_buffer_queue_fileio {
size_t block_size;
unsigned int next_dequeue;
+ bool enabled;
};
/**
@@ -95,6 +111,7 @@ struct iio_dma_buffer_queue_fileio {
* the DMA controller
* @incoming: List of buffers on the incoming queue
* @active: Whether the buffer is currently active
+ * @num_dmabufs: Total number of DMABUFs attached to this queue
* @fileio: FileIO state
*/
struct iio_dma_buffer_queue {
@@ -107,6 +124,7 @@ struct iio_dma_buffer_queue {
struct list_head incoming;
bool active;
+ atomic_t num_dmabufs;
struct iio_dma_buffer_queue_fileio fileio;
};
@@ -144,4 +162,17 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence,
+ struct sg_table *sgt,
+ size_t size, bool cyclic);
+void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
+void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
+
#endif
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 89c3fd7c29ca..e72552e026f3 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -9,8 +9,12 @@
#include <uapi/linux/iio/buffer.h>
#include <linux/iio/buffer.h>
+struct dma_buf_attachment;
+struct dma_fence;
struct iio_dev;
+struct iio_dma_buffer_block;
struct iio_buffer;
+struct sg_table;
/**
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
@@ -39,6 +43,16 @@ struct iio_buffer;
* device stops sampling. Calles are balanced with @enable.
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @attach_dmabuf: called from userspace via ioctl to attach one external
+ * DMABUF.
+ * @detach_dmabuf: called from userspace via ioctl to detach one previously
+ * attached DMABUF.
+ * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
+ * object to this buffer. Requires a valid DMABUF fd, that
+ * was previouly attached to this buffer.
+ * @lock_queue: called when the core needs to lock the buffer queue;
+ * it is used when enqueueing DMABUF objects.
+ * @unlock_queue: used to unlock a previously locked buffer queue
* @modes: Supported operating modes by this buffer type
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
*
@@ -68,6 +82,17 @@ struct iio_buffer_access_funcs {
void (*release)(struct iio_buffer *buffer);
+ struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+ void (*detach_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+ int (*enqueue_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence, struct sg_table *sgt,
+ size_t size, bool cyclic);
+ void (*lock_queue)(struct iio_buffer *buffer);
+ void (*unlock_queue)(struct iio_buffer *buffer);
+
unsigned int modes;
unsigned int flags;
};
@@ -136,6 +161,12 @@ struct iio_buffer {
/* @ref: Reference count of the buffer. */
struct kref ref;
+
+ /* @dmabufs: List of DMABUF attachments */
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+
+ /* @dmabufs_mutex: Protects dmabufs */
+ struct mutex dmabufs_mutex;
};
/**
@@ -159,6 +190,8 @@ void iio_buffer_init(struct iio_buffer *buffer);
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
void iio_buffer_put(struct iio_buffer *buffer);
+void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret);
+
#else /* CONFIG_IIO_BUFFER */
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index e9910b41d48e..333d1d8ccb37 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -441,4 +441,14 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
const char *buf, size_t len);
+/**
+ * iio_read_channel_label() - read label for a given channel
+ * @chan: The channel being queried.
+ * @buf: Where to store the attribute value. Assumed to hold
+ * at least PAGE_SIZE bytes.
+ *
+ * Returns the number of bytes written to buf, or an error code.
+ */
+ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf);
+
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 55e2b22086a1..894309294182 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -174,6 +174,27 @@ struct iio_event_spec {
};
/**
+ * struct iio_scan_type - specification for channel data format in buffer
+ * @sign: 's' or 'u' to specify signed or unsigned
+ * @realbits: Number of valid bits of data
+ * @storagebits: Realbits + padding
+ * @shift: Shift right by this before masking out realbits.
+ * @repeat: Number of times real/storage bits repeats. When the
+ * repeat element is more than 1, then the type element in
+ * sysfs will show a repeat value. Otherwise, the number
+ * of repetitions is omitted.
+ * @endianness: little or big endian
+ */
+struct iio_scan_type {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+};
+
+/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
* @channel: What number do we wish to assign the channel.
@@ -183,18 +204,13 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: struct describing the scan type
- * @scan_type.sign: 's' or 'u' to specify signed or unsigned
- * @scan_type.realbits: Number of valid bits of data
- * @scan_type.storagebits: Realbits + padding
- * @scan_type.shift: Shift right by this before masking out
- * realbits.
- * @scan_type.repeat: Number of times real/storage bits repeats.
- * When the repeat element is more than 1, then
- * the type element in sysfs will show a repeat
- * value. Otherwise, the number of repetitions
- * is omitted.
- * @scan_type.endianness: little or big endian
+ * @scan_type: struct describing the scan type - mutually exclusive
+ * with ext_scan_type.
+ * @ext_scan_type: Used in rare cases where there is more than one scan
+ * format for a channel. When this is used, the flag
+ * has_ext_scan_type must be set and the driver must
+ * implement get_current_scan_type in struct iio_info.
+ * @num_ext_scan_type: Number of elements in ext_scan_type.
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
@@ -238,6 +254,7 @@ struct iio_event_spec {
* attributes but not for event codes.
* @output: Channel is output.
* @differential: Channel is differential.
+ * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type.
*/
struct iio_chan_spec {
enum iio_chan_type type;
@@ -245,14 +262,13 @@ struct iio_chan_spec {
int channel2;
unsigned long address;
int scan_index;
- struct {
- char sign;
- u8 realbits;
- u8 storagebits;
- u8 shift;
- u8 repeat;
- enum iio_endian endianness;
- } scan_type;
+ union {
+ struct iio_scan_type scan_type;
+ struct {
+ const struct iio_scan_type *ext_scan_type;
+ unsigned int num_ext_scan_type;
+ };
+ };
long info_mask_separate;
long info_mask_separate_available;
long info_mask_shared_by_type;
@@ -270,6 +286,7 @@ struct iio_chan_spec {
unsigned indexed:1;
unsigned output:1;
unsigned differential:1;
+ unsigned has_ext_scan_type:1;
};
@@ -432,6 +449,9 @@ struct iio_trigger; /* forward declaration */
* for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
+ * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
+ * in the channel spec to return the index of the currently
+ * active ext_scan type for a channel.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
@@ -516,6 +536,8 @@ struct iio_info {
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*get_current_scan_type)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
@@ -801,6 +823,38 @@ static inline bool iio_read_acpi_mount_matrix(struct device *dev,
}
#endif
+/**
+ * iio_get_current_scan_type - Get the current scan type for a channel
+ * @indio_dev: the IIO device to get the scan type for
+ * @chan: the channel to get the scan type for
+ *
+ * Most devices only have one scan type per channel and can just access it
+ * directly without calling this function. Core IIO code and drivers that
+ * implement ext_scan_type in the channel spec should use this function to
+ * get the current scan type for a channel.
+ *
+ * Returns: the current scan type for the channel or error.
+ */
+static inline const struct iio_scan_type
+*iio_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret;
+
+ if (chan->has_ext_scan_type) {
+ ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (ret >= chan->num_ext_scan_type)
+ return ERR_PTR(-EINVAL);
+
+ return &chan->ext_scan_type[ret];
+ }
+
+ return &chan->scan_type;
+}
+
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 8898966bc0f0..e6a75356567a 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -9,6 +9,7 @@
#ifndef __IIO_ADIS_H__
#define __IIO_ADIS_H__
+#include <linux/cleanup.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/iio/iio.h>
@@ -21,6 +22,7 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
+struct iio_dev_attr;
/**
* struct adis_timeouts - ADIS chip variant timeouts
@@ -84,6 +86,7 @@ struct adis_data {
bool unmasked_drdy;
bool has_paging;
+ bool has_fifo;
unsigned int burst_reg_cmd;
unsigned int burst_len;
@@ -148,13 +151,8 @@ int __adis_reset(struct adis *adis);
*/
static inline int adis_reset(struct adis *adis)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_reset(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_reset(adis);
}
int __adis_write_reg(struct adis *adis, unsigned int reg,
@@ -246,13 +244,8 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
static inline int adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_write_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_write_reg(adis, reg, val, size);
}
/**
@@ -265,13 +258,8 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
static int adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_read_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_read_reg(adis, reg, val, size);
}
/**
@@ -363,12 +351,8 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
const u32 mask, const u32 val, u8 size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_update_bits_base(adis, reg, mask, val, size);
- mutex_unlock(&adis->state_lock);
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_update_bits_base(adis, reg, mask, val, size);
}
/**
@@ -409,35 +393,19 @@ int __adis_enable_irq(struct adis *adis, bool enable);
static inline int adis_enable_irq(struct adis *adis, bool enable)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_enable_irq(adis, enable);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_enable_irq(adis, enable);
}
static inline int adis_check_status(struct adis *adis)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_check_status(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
-}
-
-static inline void adis_dev_lock(struct adis *adis)
-{
- mutex_lock(&adis->state_lock);
+ guard(mutex)(&adis->state_lock);
+ return __adis_check_status(adis);
}
-static inline void adis_dev_unlock(struct adis *adis)
-{
- mutex_unlock(&adis->state_lock);
-}
+#define adis_dev_auto_lock(adis) guard(mutex)(&(adis)->state_lock)
+#define adis_dev_auto_scoped_lock(adis) \
+ scoped_guard(mutex, &(adis)->state_lock)
int adis_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
@@ -515,11 +483,19 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+#define devm_adis_setup_buffer_and_trigger(adis, indio_dev, trigger_handler) \
+ devm_adis_setup_buffer_and_trigger_with_attrs((adis), (indio_dev), \
+ (trigger_handler), NULL, \
+ NULL)
+
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler);
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
@@ -529,8 +505,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
static inline int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler)
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs)
{
return 0;
}
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 7a6b190c7da7..7abdc0927124 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -207,7 +207,6 @@ struct io_submit_state {
bool need_plug;
bool cq_flush;
unsigned short submit_nr;
- unsigned int cqes_count;
struct blk_plug plug;
};
@@ -648,7 +647,7 @@ struct io_kiocb {
struct io_rsrc_node *rsrc_node;
atomic_t refs;
- atomic_t poll_refs;
+ bool cancel_seq_set;
struct io_task_work io_task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
@@ -657,6 +656,7 @@ struct io_kiocb {
/* opcode allocated if it needs to store data for async defer */
void *async_data;
/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
struct io_kiocb *link;
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7bc8dff7cf6d..17b3f36ad843 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1533,7 +1533,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index b851ba415e03..75a2fb8b16c3 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -21,6 +21,8 @@ enum kcov_mode {
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
#define KCOV_IN_CTXSW (1 << 30)
@@ -55,21 +57,47 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
- * work around for kcov's lack of nested remote coverage sections support in
- * task context. Adding support for nested sections is tracked in:
- * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ * workaround for KCOV's lack of nested remote coverage sections support.
+ *
+ * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
+ *
+ * kcov_remote_start_usb_softirq():
+ *
+ * 1. Only collects coverage when called in the softirq context. This allows
+ * avoiding nested remote coverage collection sections in the task context.
+ * For example, USB/IP calls usb_hcd_giveback_urb() in the task context
+ * within an existing remote coverage collection section. Thus, KCOV should
+ * not attempt to start collecting coverage within the coverage collection
+ * section in __usb_hcd_giveback_urb() in this case.
+ *
+ * 2. Disables interrupts for the duration of the coverage collection section.
+ * This allows avoiding nested remote coverage collection sections in the
+ * softirq context (a softirq might occur during the execution of a work in
+ * the BH workqueue, which runs with in_serving_softirq() > 0).
+ * For example, usb_giveback_urb_bh() runs in the BH workqueue with
+ * interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
+ * the middle of its remote coverage collection section, and the interrupt
+ * handler might invoke __usb_hcd_giveback_urb() again.
*/
-static inline void kcov_remote_start_usb_softirq(u64 id)
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
{
- if (in_serving_softirq())
+ unsigned long flags = 0;
+
+ if (in_serving_softirq()) {
+ local_irq_save(flags);
kcov_remote_start_usb(id);
+ }
+
+ return flags;
}
-static inline void kcov_remote_stop_softirq(void)
+static inline void kcov_remote_stop_softirq(unsigned long flags)
{
- if (in_serving_softirq())
+ if (in_serving_softirq()) {
kcov_remote_stop();
+ local_irq_restore(flags);
+ }
}
#ifdef CONFIG_64BIT
@@ -103,8 +131,11 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
-static inline void kcov_remote_start_usb_softirq(u64 id) {}
-static inline void kcov_remote_stop_softirq(void) {}
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+{
+ return 0;
+}
+static inline void kcov_remote_stop_softirq(unsigned long flags) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 52c63a9c5a9c..11690dacd986 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
*/
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
-extern unsigned long ksm_zero_pages;
+extern atomic_long_t ksm_zero_pages;
+
+static inline void ksm_map_zero_page(struct mm_struct *mm)
+{
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
+}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
if (is_ksm_zero_pte(pte)) {
- ksm_zero_pages--;
- mm->ksm_zero_pages--;
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
}
}
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->ksm_zero_pages);
+}
+
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 13fb41d25da6..7d3bd7c9664a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1249,6 +1249,7 @@ extern int ata_slave_link_init(struct ata_port *ap);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
extern void ata_port_probe(struct ata_port *ap);
+extern void ata_port_free(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5e51b0de4c4b..08b0d1d9d78b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -297,9 +297,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
.wait_type_inner = _wait_type, \
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
-#define lock_map_assert_held(l) \
- lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD)
-
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -391,8 +388,6 @@ extern int lockdep_is_held(const void *);
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
struct lockdep_map __maybe_unused _name = {}
-#define lock_map_assert_held(l) do { (void)(l); } while (0)
-
#endif /* !LOCKDEP */
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index f804b76cde44..44488b1ab9a9 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -413,7 +413,7 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
#ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
diff --git a/include/linux/math.h b/include/linux/math.h
index dd4152711de7..f5f18dc3616b 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -112,6 +112,8 @@ struct type##_fract { \
__##type numerator; \
__##type denominator; \
};
+__STRUCT_FRACT(s8)
+__STRUCT_FRACT(u8)
__STRUCT_FRACT(s16)
__STRUCT_FRACT(u16)
__STRUCT_FRACT(s32)
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index b573f15762f8..fabd6ed8d258 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -290,6 +290,7 @@ struct mhi_controller_config {
/**
* struct mhi_controller - Master MHI controller structure
+ * @name: Device name of the MHI controller
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
* controller (required)
* @mhi_dev: MHI device instance for the controller
@@ -367,6 +368,7 @@ struct mhi_controller_config {
* they can be populated depending on the usecase.
*/
struct mhi_controller {
+ const char *name;
struct device *cntrl_dev;
struct mhi_device *mhi_dev;
struct dentry *debugfs_dentry;
diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h
new file mode 100644
index 000000000000..323b31a847c5
--- /dev/null
+++ b/include/linux/misc/keba.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024, KEBA Industrial Automation Gmbh */
+
+#ifndef _LINUX_MISC_KEBA_H
+#define _LINUX_MISC_KEBA_H
+
+#include <linux/auxiliary_bus.h>
+
+struct i2c_board_info;
+
+/**
+ * struct keba_i2c_auxdev - KEBA I2C auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of I2C controller IO memory
+ * @info_size: number of I2C devices to be probed
+ * @info: I2C devices to be probed
+ */
+struct keba_i2c_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct i2c_board_info *info;
+};
+
+#endif /* _LINUX_MISC_KEBA_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f468763478ae..5df52e15f7d6 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10308,9 +10308,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
- u8 regs_31_to_10[0x16];
+ u8 regs_31_to_11[0x15];
u8 mtmp[0x1];
- u8 regs_8_to_0[0x9];
+ u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..eb7c96d24ac0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -406,6 +406,11 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ALLOW_ANY_UNCACHED VM_NONE
#endif
+#ifdef CONFIG_64BIT
+/* VM is sealed, in vm_flags */
+#define VM_SEALED _BITUL(63)
+#endif
+
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
@@ -3776,14 +3781,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
- &init_on_free);
-}
-
-DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
-static inline bool want_init_mlocked_on_free(void)
-{
- return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON,
- &init_mlocked_on_free);
+ &init_on_free);
}
extern bool _debug_pagealloc_enabled_early;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 24323c7d0bd4..af3a0256fa93 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -985,7 +985,7 @@ struct mm_struct {
* Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages.
*/
- unsigned long ksm_zero_pages;
+ atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8f9c9590a42c..586a8f0104d7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -654,13 +654,12 @@ enum zone_watermarks {
};
/*
- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
- * it should not contribute to serious fragmentation causing THP allocation
- * failures.
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
+ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
+ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define NR_PCP_THP 1
+#define NR_PCP_THP 2
#else
#define NR_PCP_THP 0
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index ffa1c603163c..330ffb59efe5 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -931,11 +931,11 @@ int module_kallsyms_on_each_symbol(const char *modname,
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
* found, otherwise NULL.
*/
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname, const unsigned char **modbuildid,
- char *namebuf);
+int module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr,
unsigned long *size,
@@ -964,14 +964,14 @@ static inline int module_kallsyms_on_each_symbol(const char *modname,
}
/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
+static inline int module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
const unsigned char **modbuildid,
char *namebuf)
{
- return NULL;
+ return 0;
}
static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index d2d291a9cdad..5d0288938cc2 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -68,6 +68,7 @@ struct netfs_inode {
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
+ atomic_t io_count; /* Number of outstanding reqs */
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
@@ -474,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->remote_i_size = i_size_read(&ctx->inode);
ctx->zero_point = LLONG_MAX;
ctx->flags = 0;
+ atomic_set(&ctx->io_count, 0);
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -517,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
#endif
}
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @inode: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete. This is intended
+ * to be called from inode eviction routines. This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
+
#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 1d43371fafd2..eb19503604fe 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -15,6 +15,11 @@
#define NUMA_NO_NODE (-1)
#define NUMA_NO_MEMBLK (-1)
+static inline bool numa_valid_node(int nid)
+{
+ return nid >= 0 && nid < MAX_NUMNODES;
+}
+
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
#define __initdata_or_meminfo
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 425573202295..c693ac344ec0 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -85,10 +85,11 @@ enum {
enum {
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_INVALID = 0xff,
};
-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
- * RDMA_QPTYPE field
+/* RDMA Provider Type codes for Discovery Log Page entry TSAS
+ * RDMA_PRTYPE field
*/
enum {
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
@@ -110,6 +111,7 @@ enum {
NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_INVALID = 0xff,
};
#define NVME_AQ_DEPTH 32
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 104078afe0b1..b9e914e1face 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -944,15 +944,18 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
* mistaken for a page type value.
*/
-#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of _mapcount */
-#define PAGE_MAPCOUNT_RESERVE -128
-#define PG_buddy 0x00000080
-#define PG_offline 0x00000100
-#define PG_table 0x00000200
-#define PG_guard 0x00000400
-#define PG_hugetlb 0x00000800
-#define PG_slab 0x00001000
+enum pagetype {
+ PG_buddy = 0x00000080,
+ PG_offline = 0x00000100,
+ PG_table = 0x00000200,
+ PG_guard = 0x00000400,
+ PG_hugetlb = 0x00000800,
+ PG_slab = 0x00001000,
+
+ PAGE_TYPE_BASE = 0xf0000000,
+ /* Reserve 0x0000007f to catch underflows of _mapcount */
+ PAGE_MAPCOUNT_RESERVE = -128,
+};
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3d69589c00a4..59f1df0cde5a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -346,6 +346,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER 8
+#endif
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
@@ -368,10 +381,22 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
+ /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+ "Anonymous mapping always supports large folio");
+
return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(struct address_space *mapping)
+{
+ if (mapping_large_folio_support(mapping))
+ return PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ return PAGE_SIZE;
+}
+
static inline int filemap_nr_thps(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -530,19 +555,6 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
#else
diff --git a/include/linux/parport.h b/include/linux/parport.h
index fff39bc30629..464c2ad28039 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -252,13 +252,10 @@ struct parport {
struct parport_driver {
const char *name;
- void (*attach) (struct parport *);
void (*detach) (struct parport *);
void (*match_port)(struct parport *);
int (*probe)(struct pardevice *);
struct device_driver driver;
- bool devmodel;
- struct list_head list;
};
#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
@@ -301,9 +298,6 @@ int __must_check __parport_register_driver(struct parport_driver *,
* to receive notifications about ports being found in the
* system, as well as ports no longer available.
*
- * If devmodel is true then the new device model is used
- * for registration.
- *
* The @driver structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb004fd4e889..cafc5ab1cbcb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -413,8 +413,6 @@ struct pci_dev {
struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
bool match_driver; /* Skip attaching driver */
- struct lock_class_key cfg_access_key;
- struct lockdep_map cfg_access_lock;
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int io_window:1; /* Bridge has I/O window */
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
index ff8ae9c26c80..601cdd086bf6 100644
--- a/include/linux/peci-cpu.h
+++ b/include/linux/peci-cpu.h
@@ -6,6 +6,30 @@
#include <linux/types.h>
+/* Copied from x86 <asm/processor.h> */
+#define X86_VENDOR_INTEL 0
+
+/* Copied from x86 <asm/cpu_device_id.h> */
+#define VFM_MODEL_BIT 0
+#define VFM_FAMILY_BIT 8
+#define VFM_VENDOR_BIT 16
+#define VFM_RSVD_BIT 24
+
+#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
+#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
+#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
+
+#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
+#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
+#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
+
+#define VFM_MAKE(_vendor, _family, _model) ( \
+ ((_model) << VFM_MODEL_BIT) | \
+ ((_family) << VFM_FAMILY_BIT) | \
+ ((_vendor) << VFM_VENDOR_BIT) \
+)
+/* End of copied code */
+
#include "../../arch/x86/include/asm/intel-family.h"
#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 90e241458ef6..3e0bc37591d6 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -59,8 +59,7 @@ static inline struct peci_controller *to_peci_controller(void *d)
* struct peci_device - PECI device
* @dev: device object to register PECI device to the device model
* @info: PECI device characteristics
- * @info.family: device family
- * @info.model: device model
+ * @info.x86_vfm: device vendor-family-model
* @info.peci_revision: PECI revision supported by the PECI device
* @info.socket_id: the socket ID represented by the PECI device
* @addr: address used on the PECI bus connected to the parent controller
@@ -73,8 +72,7 @@ static inline struct peci_controller *to_peci_controller(void *d)
struct peci_device {
struct device dev;
struct {
- u16 family;
- u8 model;
+ u32 x86_vfm;
u8 peci_revision;
u8 socket_id;
} info;
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 86ba5d33e43b..9cacadbd61f8 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -37,6 +37,9 @@ static inline union codetag_ref *get_page_tag_ref(struct page *page)
static inline void put_page_tag_ref(union codetag_ref *ref)
{
+ if (WARN_ON(!ref))
+ return;
+
page_ext_put(page_ext_from_codetag_ref(ref));
}
@@ -102,9 +105,11 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
union codetag_ref *ref = get_page_tag_ref(page);
alloc_tag_sub_check(ref);
- if (ref && ref->ct)
- tag = ct_to_alloc_tag(ref->ct);
- put_page_tag_ref(ref);
+ if (ref) {
+ if (ref->ct)
+ tag = ct_to_alloc_tag(ref->ct);
+ put_page_tag_ref(ref);
+ }
}
return tag;
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 82561242cda4..7f2ff95d2deb 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -435,8 +435,6 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern const struct bus_type pnp_bus_type;
-
#if defined(CONFIG_PNP)
/* device management */
@@ -469,7 +467,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id);
int pnp_register_driver(struct pnp_driver *drv);
void pnp_unregister_driver(struct pnp_driver *drv);
-#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type)
+bool dev_is_pnp(const struct device *dev);
#else
@@ -502,7 +500,7 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E
static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
-#define dev_is_pnp(d) false
+static inline bool dev_is_pnp(const struct device *dev) { return false; }
#endif /* CONFIG_PNP */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 40afab23881a..65c5184470f1 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -60,9 +60,6 @@ static inline const char *printk_skip_headers(const char *buffer)
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
-int add_preferred_console_match(const char *match, const char *name,
- const short idx);
-
extern int console_printk[];
#define console_loglevel (console_printk[0])
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index 6d07c95dabb9..6eec24ffa866 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -167,14 +167,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline bool pse_has_podl(struct pse_control *psec)
diff --git a/include/linux/security.h b/include/linux/security.h
index 21cf70346b33..de3af33e6ff5 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2048,7 +2048,8 @@ static inline void security_key_post_create_or_update(struct key *keyring,
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
void security_audit_rule_free(void *lsmrule);
@@ -2056,7 +2057,7 @@ void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return 0;
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 8cb65f50e830..aea25eef9a1a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -811,8 +811,7 @@ enum UART_TX_FLAGS {
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \
- __port->ops->tx_empty(__port)) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
@@ -852,6 +851,24 @@ enum UART_TX_FLAGS {
})
/**
+ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
* uart_port_tx -- transmit helper for uart_port
* @port: uart port
* @ch: variable to store a character to be written to the HW
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e8e1e798924f..8f119f355632 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1085,12 +1085,13 @@ struct spi_transfer {
unsigned dummy_data:1;
unsigned cs_off:1;
unsigned cs_change:1;
- unsigned tx_nbits:3;
- unsigned rx_nbits:3;
+ unsigned tx_nbits:4;
+ unsigned rx_nbits:4;
unsigned timestamped:1;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
+#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -1268,6 +1269,8 @@ static inline void spi_message_free(struct spi_message *m)
extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
extern void spi_unoptimize_message(struct spi_message *msg);
+extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
diff --git a/include/linux/string.h b/include/linux/string.h
index 60168aa2af07..9edace076ddb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -289,7 +289,7 @@ extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_si
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
__realloc_size(2, 3);
/* lib/argv_split.c */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 9104952d323d..63424af87bba 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -322,13 +322,13 @@ asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long nr,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
struct old_timespec32 __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_uring_setup(u32 entries,
struct io_uring_params __user *p);
asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
@@ -418,7 +418,7 @@ asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
u64 __user *mnt_ids, size_t nr_mnt_ids,
unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
#if BITS_PER_LONG == 32
asmlinkage long sys_truncate64(const char __user *path, loff_t length);
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
@@ -441,7 +441,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
asmlinkage long sys_openat2(int dfd, const char __user *filename,
- struct open_how *how, size_t size);
+ struct open_how __user *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
@@ -555,7 +555,7 @@ asmlinkage long sys_get_robust_list(int pid,
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
-asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
@@ -907,7 +907,7 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
-asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size);
asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
@@ -960,11 +960,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
-asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
- u32 *size, u32 flags);
-asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
u32 size, u32 flags);
-asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 __user *ids, u32 __user *size, u32 flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index a7d725fbf739..c4e64dc11206 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -750,6 +750,15 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
{
return 0;
}
+
+static inline ssize_t sysfs_bin_attr_simple_read(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index c17e4efbb2e5..21a67dc9efe8 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -394,21 +394,6 @@ enum tpm2_object_attributes {
TPM2_OA_SIGN = BIT(18),
};
-/*
- * definitions for the canonical template. These are mandated
- * by the TCG key template documents
- */
-
-#define AES_KEY_BYTES AES_KEYSIZE_128
-#define AES_KEY_BITS (AES_KEY_BYTES*8)
-#define TPM2_OA_TMPL (TPM2_OA_NO_DA | \
- TPM2_OA_FIXED_TPM | \
- TPM2_OA_FIXED_PARENT | \
- TPM2_OA_SENSITIVE_DATA_ORIGIN | \
- TPM2_OA_USER_WITH_AUTH | \
- TPM2_OA_DECRYPT | \
- TPM2_OA_RESTRICTED)
-
enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
TPM2_SA_AUDIT_EXCLUSIVE = BIT(1),
@@ -437,8 +422,6 @@ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
-u8 *tpm_buf_parameters(struct tpm_buf *buf);
-
/*
* Check if TPM device is in the firmware upgrade mode.
*/
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 8b1a29820409..000a6cab2d31 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -64,6 +64,7 @@ struct vfio_device {
struct completion comp;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
+ struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
u8 iommufd_attached:1;
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index a2c8b8bba711..f87067438ed4 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -93,8 +93,6 @@ struct vfio_pci_core_device {
struct list_head sriov_pfs_item;
struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb;
- struct mutex vma_lock;
- struct list_head vma_list;
struct rw_semaphore memory_lock;
};
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 9a2a0ef39018..064805bfae3f 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -85,7 +85,8 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
*
* @data: the first parameter in all the functions below
*
- * @read_bit: Sample the line level @return the level read (0 or 1)
+ * @read_bit: Sample the line level
+ * @return the level read (0 or 1)
*
* @write_bit: Sets the line level
*
@@ -95,7 +96,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
* touch_bit(1) = write-1 / read cycle
* @return the bit read (0 or 1)
*
- * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls.
+ * @read_byte: Reads a byte. Same as 8 touch_bit(1) calls.
* @return the byte read
*
* @write_byte: Writes a byte. Same as 8 touch_bit(x) calls.
@@ -114,7 +115,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
* @set_pullup: Put out a strong pull-up pulse of the specified duration.
* @return -1=Error, 0=completed
*
- * @search: Really nice hardware can handles the different types of ROM search
+ * @search: Really nice hardware can handle the different types of ROM search
* w1_master* is passed to the slave found callback.
* u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
*
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fb3993894536..d9968bfc8eac 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -95,7 +95,7 @@ enum wq_misc_consts {
WORK_BUSY_RUNNING = 1 << 1,
/* maximum string length for set_worker_desc() */
- WORKER_DESC_LEN = 24,
+ WORKER_DESC_LEN = 32,
};
/* Convenience constants - of type 'unsigned long', not 'enum'! */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9231396fe96f..c43716edf205 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -2113,18 +2113,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
{
u16 max_latency;
- if (min > max || min < 6 || max > 3200)
+ if (min > max) {
+ BT_WARN("min %d > max %d", min, max);
return -EINVAL;
+ }
+
+ if (min < 6) {
+ BT_WARN("min %d < 6", min);
+ return -EINVAL;
+ }
+
+ if (max > 3200) {
+ BT_WARN("max %d > 3200", max);
+ return -EINVAL;
+ }
+
+ if (to_multiplier < 10) {
+ BT_WARN("to_multiplier %d < 10", to_multiplier);
+ return -EINVAL;
+ }
- if (to_multiplier < 10 || to_multiplier > 3200)
+ if (to_multiplier > 3200) {
+ BT_WARN("to_multiplier %d > 3200", to_multiplier);
return -EINVAL;
+ }
- if (max >= to_multiplier * 8)
+ if (max >= to_multiplier * 8) {
+ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
return -EINVAL;
+ }
max_latency = (to_multiplier * 4 / max) - 1;
- if (latency > 499 || latency > max_latency)
+ if (latency > 499) {
+ BT_WARN("latency %d > 499", latency);
return -EINVAL;
+ }
+
+ if (latency > max_latency) {
+ BT_WARN("latency %d > max_latency %d", latency, max_latency);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 6d1c8541183d..3a9001a042a5 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -24,7 +24,7 @@ struct dst_ops {
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
- struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*negative_advice)(struct sock *sk, struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7d6b1254c92d..c0deaafebfdc 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -263,7 +263,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a08ec7713..1db2417b8ff5 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -461,9 +461,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
+ bool inner_proto_inherit)
{
- int nhlen = 0, maclen = ETH_HLEN;
+ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2796153b03da..188d41da1a40 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -619,6 +619,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
+{
+ return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
static inline bool nft_set_gc_is_pending(const struct nft_set *s)
{
return refcount_read(&s->refs) != 1;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 02bbdc577f8e..a6a0bf4a247e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -15,6 +15,9 @@ struct netns_nf {
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
+#ifdef CONFIG_LWTUNNEL
+ struct ctl_table_header *nf_lwtnl_dir_header;
+#endif
#endif
struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index b088d131aeb0..7e8477057f3d 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -45,16 +45,17 @@ struct pp_alloc_cache {
/**
* struct page_pool_params - page pool parameters
+ * @fast: params accessed frequently on hotpath
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
- * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- * @netdev: corresponding &net_device for Netlink introspection
+ * @slow: params with slowpath access only (initialization and Netlink)
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
*/
struct page_pool_params {
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index d88c0dfc2d46..ebcb8896bffc 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -285,4 +285,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
return atomic_read(&queue->young);
}
+/* RFC 7323 2.3 Using the Window Scale Option
+ * The window field (SEG.WND) of every outgoing segment, with the
+ * exception of <SYN> segments, MUST be right-shifted by
+ * Rcv.Wind.Shift bits.
+ *
+ * This means the SEG.WND carried in SYNACK can not exceed 65535.
+ * We use this property to harden TCP stack while in NEW_SYN_RECV state.
+ */
+static inline u32 tcp_synack_window(const struct request_sock *req)
+{
+ return min(req->rsk_rcv_wnd, 65535U);
+}
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3bfb80bad173..b45d57b5968a 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,6 +13,7 @@ enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
};
enum rtnl_kinds {
diff --git a/include/net/sock.h b/include/net/sock.h
index 5f4d0629348f..953c8dc4e259 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2063,17 +2063,10 @@ sk_dst_get(const struct sock *sk)
static inline void __dst_negative_advice(struct sock *sk)
{
- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+ struct dst_entry *dst = __sk_dst_get(sk);
- if (dst && dst->ops->negative_advice) {
- ndst = dst->ops->negative_advice(dst);
-
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
- WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
+ if (dst && dst->ops->negative_advice)
+ dst->ops->negative_advice(sk, dst);
}
static inline void dst_negative_advice(struct sock *sk)
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 471e177362b4..5d8e9ed2c005 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head;
- /* current_key and rnext_key aren't maintained on listen sockets.
+ /* current_key and rnext_key are maintained on sockets
+ * in TCP_AO_ESTABLISHED states.
* Their purpose is to cache keys on established connections,
* saving needless lookups. Never dereference any of them from
* listen sockets.
@@ -201,9 +202,9 @@ struct tcp6_ao_context {
};
struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
- TCPF_CLOSE | TCPF_CLOSE_WAIT | \
- TCPF_LAST_ACK | TCPF_CLOSING)
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_ao_key *key, struct tcphdr *th,
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 6b548dc2c496..1d79a3b536ce 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -69,8 +69,10 @@
#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
/* Always retry ABORTED_COMMAND with ASC 0xc1 */
#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+/* Do not query the IO Advice Hints Grouping mode page */
+#define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34))
-#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 0e75b9277c8c..e3b6ce3cbf88 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
void sas_disable_tlr(struct scsi_device *);
void sas_enable_tlr(struct scsi_device *);
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
+
extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
void sas_rphy_free(struct sas_rphy *);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c11aaf8079fb..f6baa9a01868 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
struct dma_chan *chan);
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 61c6054618c8..3edd7a7346da 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -124,7 +124,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */
#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
-#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */
#define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index cf4b98b9a9ed..7d931db02b93 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
+ cachefiles_obj_get_read_req,
+ cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
+ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
+ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
+ EM(cachefiles_obj_get_read_req, "GET read_req") \
+ E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index d695a560673f..5ccc0d91b220 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -36,10 +36,11 @@
#define QUADLET_SIZE 4
DECLARE_EVENT_CLASS(async_outbound_initiate_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__array(u32, header, ASYNC_HEADER_QUADLET_COUNT)
@@ -47,6 +48,7 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT);
@@ -54,8 +56,9 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
// This format is for the request subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -71,10 +74,11 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_outbound_complete_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -82,14 +86,16 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -99,10 +105,11 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_inbound_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -112,6 +119,7 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
@@ -121,8 +129,9 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
// This format is for the response subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -139,26 +148,27 @@ DECLARE_EVENT_CLASS(async_inbound_template,
);
DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count)
);
DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
DEFINE_EVENT(async_inbound_template, async_response_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count)
);
DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -175,11 +185,12 @@ DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
);
DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -194,8 +205,8 @@ DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_ini
);
DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
#undef ASYNC_HEADER_GET_DESTINATION
@@ -206,23 +217,26 @@ DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
#undef ASYNC_HEADER_GET_RCODE
TRACE_EVENT(async_phy_outbound_initiate,
- TP_PROTO(u64 packet, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u32, first_quadlet)
__field(u32, second_quadlet)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->first_quadlet = first_quadlet;
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->first_quadlet,
__entry->second_quadlet
@@ -230,23 +244,26 @@ TRACE_EVENT(async_phy_outbound_initiate,
);
TRACE_EVENT(async_phy_outbound_complete,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp),
- TP_ARGS(packet, generation, status, timestamp),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp),
+ TP_ARGS(packet, card_index, generation, status, timestamp),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp
@@ -254,10 +271,11 @@ TRACE_EVENT(async_phy_outbound_complete,
);
TRACE_EVENT(async_phy_inbound,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, status, timestamp, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
@@ -273,8 +291,9 @@ TRACE_EVENT(async_phy_inbound,
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp,
@@ -284,55 +303,61 @@ TRACE_EVENT(async_phy_inbound,
);
DECLARE_EVENT_CLASS(bus_reset_arrange_template,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset),
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(bool, short_reset)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->short_reset = short_reset;
),
TP_printk(
- "generation=%u short_reset=%s",
+ "card_index=%u generation=%u short_reset=%s",
+ __entry->card_index,
__entry->generation,
__entry->short_reset ? "true" : "false"
)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
TRACE_EVENT(bus_reset_handle,
- TP_PROTO(unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
- TP_ARGS(generation, node_id, bm_abdicate, self_ids, self_id_count),
+ TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
+ TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, node_id)
__field(bool, bm_abdicate)
__dynamic_array(u32, self_ids, self_id_count)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->node_id = node_id;
__entry->bm_abdicate = bm_abdicate;
memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids));
),
TP_printk(
- "generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ __entry->card_index,
__entry->generation,
__entry->node_id,
__entry->bm_abdicate ? "true" : "false",
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index f1b5e816e7e5..ff33f41a9db7 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,7 +81,7 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q)->name )
+ __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index d983c48a3b6a..d4cc26932ff4 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index f2afb7cc4926..18e3745b86cd 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -69,8 +69,7 @@ struct proc_input {
static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
{
- ev_type &= PROC_EVENT_ALL;
- return ev_type;
+ return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
}
/*
diff --git a/include/uapi/linux/iio/buffer.h b/include/uapi/linux/iio/buffer.h
index 13939032b3f6..c666aa95e532 100644
--- a/include/uapi/linux/iio/buffer.h
+++ b/include/uapi/linux/iio/buffer.h
@@ -5,6 +5,28 @@
#ifndef _UAPI_IIO_BUFFER_H_
#define _UAPI_IIO_BUFFER_H_
+#include <linux/types.h>
+
+/* Flags for iio_dmabuf.flags */
+#define IIO_BUFFER_DMABUF_CYCLIC (1 << 0)
+#define IIO_BUFFER_DMABUF_SUPPORTED_FLAGS 0x00000001
+
+/**
+ * struct iio_dmabuf - Descriptor for a single IIO DMABUF object
+ * @fd: file descriptor of the DMABUF object
+ * @flags: one or more IIO_BUFFER_DMABUF_* flags
+ * @bytes_used: number of bytes used in this DMABUF for the data transfer.
+ * Should generally be set to the DMABUF's size.
+ */
+struct iio_dmabuf {
+ __u32 fd;
+ __u32 flags;
+ __u64 bytes_used;
+};
+
#define IIO_BUFFER_GET_FD_IOCTL _IOWR('i', 0x91, int)
+#define IIO_BUFFER_DMABUF_ATTACH_IOCTL _IOW('i', 0x92, int)
+#define IIO_BUFFER_DMABUF_DETACH_IOCTL _IOW('i', 0x93, int)
+#define IIO_BUFFER_DMABUF_ENQUEUE_IOCTL _IOW('i', 0x94, struct iio_dmabuf)
#endif /* _UAPI_IIO_BUFFER_H_ */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 03edf2ccdf6c..a4206723f503 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -618,6 +618,8 @@
#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
+#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
+#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/kd.h b/include/uapi/linux/kd.h
index 8ddb2219a84b..6b384065c013 100644
--- a/include/uapi/linux/kd.h
+++ b/include/uapi/linux/kd.h
@@ -5,61 +5,60 @@
#include <linux/compiler.h>
/* 0x4B is 'K', to avoid collision with termios and vt */
-#define KD_IOCTL_BASE 'K'
-#define GIO_FONT _IO(KD_IOCTL_BASE, 0x60) /* gets font in expanded form */
-#define PIO_FONT _IO(KD_IOCTL_BASE, 0x61) /* use font in expanded form */
+#define GIO_FONT 0x4B60 /* gets font in expanded form */
+#define PIO_FONT 0x4B61 /* use font in expanded form */
-#define GIO_FONTX _IO(KD_IOCTL_BASE, 0x6B) /* get font using struct consolefontdesc */
-#define PIO_FONTX _IO(KD_IOCTL_BASE, 0x6C) /* set font using struct consolefontdesc */
+#define GIO_FONTX 0x4B6B /* get font using struct consolefontdesc */
+#define PIO_FONTX 0x4B6C /* set font using struct consolefontdesc */
struct consolefontdesc {
unsigned short charcount; /* characters in font (256 or 512) */
unsigned short charheight; /* scan lines per character (1-32) */
char __user *chardata; /* font data in expanded form */
};
-#define PIO_FONTRESET _IO(KD_IOCTL_BASE, 0x6D) /* reset to default font */
+#define PIO_FONTRESET 0x4B6D /* reset to default font */
-#define GIO_CMAP _IO(KD_IOCTL_BASE, 0x70) /* gets colour palette on VGA+ */
-#define PIO_CMAP _IO(KD_IOCTL_BASE, 0x71) /* sets colour palette on VGA+ */
+#define GIO_CMAP 0x4B70 /* gets colour palette on VGA+ */
+#define PIO_CMAP 0x4B71 /* sets colour palette on VGA+ */
-#define KIOCSOUND _IO(KD_IOCTL_BASE, 0x2F) /* start sound generation (0 for off) */
-#define KDMKTONE _IO(KD_IOCTL_BASE, 0x30) /* generate tone */
+#define KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
+#define KDMKTONE 0x4B30 /* generate tone */
-#define KDGETLED _IO(KD_IOCTL_BASE, 0x31) /* return current led state */
-#define KDSETLED _IO(KD_IOCTL_BASE, 0x32) /* set led state [lights, not flags] */
+#define KDGETLED 0x4B31 /* return current led state */
+#define KDSETLED 0x4B32 /* set led state [lights, not flags] */
#define LED_SCR 0x01 /* scroll lock led */
#define LED_NUM 0x02 /* num lock led */
#define LED_CAP 0x04 /* caps lock led */
-#define KDGKBTYPE _IO(KD_IOCTL_BASE, 0x33) /* get keyboard type */
+#define KDGKBTYPE 0x4B33 /* get keyboard type */
#define KB_84 0x01
#define KB_101 0x02 /* this is what we always answer */
#define KB_OTHER 0x03
-#define KDADDIO _IO(KD_IOCTL_BASE, 0x34) /* add i/o port as valid */
-#define KDDELIO _IO(KD_IOCTL_BASE, 0x35) /* del i/o port as valid */
-#define KDENABIO _IO(KD_IOCTL_BASE, 0x36) /* enable i/o to video board */
-#define KDDISABIO _IO(KD_IOCTL_BASE, 0x37) /* disable i/o to video board */
+#define KDADDIO 0x4B34 /* add i/o port as valid */
+#define KDDELIO 0x4B35 /* del i/o port as valid */
+#define KDENABIO 0x4B36 /* enable i/o to video board */
+#define KDDISABIO 0x4B37 /* disable i/o to video board */
-#define KDSETMODE _IO(KD_IOCTL_BASE, 0x3A) /* set text/graphics mode */
+#define KDSETMODE 0x4B3A /* set text/graphics mode */
#define KD_TEXT 0x00
#define KD_GRAPHICS 0x01
#define KD_TEXT0 0x02 /* obsolete */
#define KD_TEXT1 0x03 /* obsolete */
-#define KDGETMODE _IO(KD_IOCTL_BASE, 0x3B) /* get current mode */
+#define KDGETMODE 0x4B3B /* get current mode */
-#define KDMAPDISP _IO(KD_IOCTL_BASE, 0x3C) /* map display into address space */
-#define KDUNMAPDISP _IO(KD_IOCTL_BASE, 0x3D) /* unmap display from address space */
+#define KDMAPDISP 0x4B3C /* map display into address space */
+#define KDUNMAPDISP 0x4B3D /* unmap display from address space */
typedef char scrnmap_t;
#define E_TABSZ 256
-#define GIO_SCRNMAP _IO(KD_IOCTL_BASE, 0x40) /* get screen mapping from kernel */
-#define PIO_SCRNMAP _IO(KD_IOCTL_BASE, 0x41) /* put screen mapping table in kernel */
-#define GIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x69) /* get full Unicode screen mapping */
-#define PIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x6A) /* set full Unicode screen mapping */
+#define GIO_SCRNMAP 0x4B40 /* get screen mapping from kernel */
+#define PIO_SCRNMAP 0x4B41 /* put screen mapping table in kernel */
+#define GIO_UNISCRNMAP 0x4B69 /* get full Unicode screen mapping */
+#define PIO_UNISCRNMAP 0x4B6A /* set full Unicode screen mapping */
-#define GIO_UNIMAP _IO(KD_IOCTL_BASE, 0x66) /* get unicode-to-font mapping from kernel */
+#define GIO_UNIMAP 0x4B66 /* get unicode-to-font mapping from kernel */
struct unipair {
unsigned short unicode;
unsigned short fontpos;
@@ -68,8 +67,8 @@ struct unimapdesc {
unsigned short entry_ct;
struct unipair __user *entries;
};
-#define PIO_UNIMAP _IO(KD_IOCTL_BASE, 0x67) /* put unicode-to-font mapping in kernel */
-#define PIO_UNIMAPCLR _IO(KD_IOCTL_BASE, 0x68) /* clear table, possibly advise hash algorithm */
+#define PIO_UNIMAP 0x4B67 /* put unicode-to-font mapping in kernel */
+#define PIO_UNIMAPCLR 0x4B68 /* clear table, possibly advise hash algorithm */
struct unimapinit {
unsigned short advised_hashsize; /* 0 if no opinion */
unsigned short advised_hashstep; /* 0 if no opinion */
@@ -84,19 +83,19 @@ struct unimapinit {
#define K_MEDIUMRAW 0x02
#define K_UNICODE 0x03
#define K_OFF 0x04
-#define KDGKBMODE _IO(KD_IOCTL_BASE, 0x44) /* gets current keyboard mode */
-#define KDSKBMODE _IO(KD_IOCTL_BASE, 0x45) /* sets current keyboard mode */
+#define KDGKBMODE 0x4B44 /* gets current keyboard mode */
+#define KDSKBMODE 0x4B45 /* sets current keyboard mode */
#define K_METABIT 0x03
#define K_ESCPREFIX 0x04
-#define KDGKBMETA _IO(KD_IOCTL_BASE, 0x62) /* gets meta key handling mode */
-#define KDSKBMETA _IO(KD_IOCTL_BASE, 0x63) /* sets meta key handling mode */
+#define KDGKBMETA 0x4B62 /* gets meta key handling mode */
+#define KDSKBMETA 0x4B63 /* sets meta key handling mode */
#define K_SCROLLLOCK 0x01
#define K_NUMLOCK 0x02
#define K_CAPSLOCK 0x04
-#define KDGKBLED _IO(KD_IOCTL_BASE, 0x64) /* get led flags (not lights) */
-#define KDSKBLED _IO(KD_IOCTL_BASE, 0x65) /* set led flags (not lights) */
+#define KDGKBLED 0x4B64 /* get led flags (not lights) */
+#define KDSKBLED 0x4B65 /* set led flags (not lights) */
struct kbentry {
unsigned char kb_table;
@@ -108,15 +107,15 @@ struct kbentry {
#define K_ALTTAB 0x02
#define K_ALTSHIFTTAB 0x03
-#define KDGKBENT _IO(KD_IOCTL_BASE, 0x46) /* gets one entry in translation table */
-#define KDSKBENT _IO(KD_IOCTL_BASE, 0x47) /* sets one entry in translation table */
+#define KDGKBENT 0x4B46 /* gets one entry in translation table */
+#define KDSKBENT 0x4B47 /* sets one entry in translation table */
struct kbsentry {
unsigned char kb_func;
unsigned char kb_string[512];
};
-#define KDGKBSENT _IO(KD_IOCTL_BASE, 0x48) /* gets one function key string entry */
-#define KDSKBSENT _IO(KD_IOCTL_BASE, 0x49) /* sets one function key string entry */
+#define KDGKBSENT 0x4B48 /* gets one function key string entry */
+#define KDSKBSENT 0x4B49 /* sets one function key string entry */
struct kbdiacr {
unsigned char diacr, base, result;
@@ -125,8 +124,8 @@ struct kbdiacrs {
unsigned int kb_cnt; /* number of entries in following array */
struct kbdiacr kbdiacr[256]; /* MAX_DIACR from keyboard.h */
};
-#define KDGKBDIACR _IO(KD_IOCTL_BASE, 0x4A) /* read kernel accent table */
-#define KDSKBDIACR _IO(KD_IOCTL_BASE, 0x4B) /* write kernel accent table */
+#define KDGKBDIACR 0x4B4A /* read kernel accent table */
+#define KDSKBDIACR 0x4B4B /* write kernel accent table */
struct kbdiacruc {
unsigned int diacr, base, result;
@@ -135,16 +134,16 @@ struct kbdiacrsuc {
unsigned int kb_cnt; /* number of entries in following array */
struct kbdiacruc kbdiacruc[256]; /* MAX_DIACR from keyboard.h */
};
-#define KDGKBDIACRUC _IO(KD_IOCTL_BASE, 0xFA) /* read kernel accent table - UCS */
-#define KDSKBDIACRUC _IO(KD_IOCTL_BASE, 0xFB) /* write kernel accent table - UCS */
+#define KDGKBDIACRUC 0x4BFA /* read kernel accent table - UCS */
+#define KDSKBDIACRUC 0x4BFB /* write kernel accent table - UCS */
struct kbkeycode {
unsigned int scancode, keycode;
};
-#define KDGETKEYCODE _IO(KD_IOCTL_BASE, 0x4C) /* read kernel keycode table entry */
-#define KDSETKEYCODE _IO(KD_IOCTL_BASE, 0x4D) /* write kernel keycode table entry */
+#define KDGETKEYCODE 0x4B4C /* read kernel keycode table entry */
+#define KDSETKEYCODE 0x4B4D /* write kernel keycode table entry */
-#define KDSIGACCEPT _IO(KD_IOCTL_BASE, 0x4E) /* accept kbd generated signals */
+#define KDSIGACCEPT 0x4B4E /* accept kbd generated signals */
struct kbd_repeat {
int delay; /* in msec; <= 0: don't change */
@@ -152,11 +151,10 @@ struct kbd_repeat {
/* earlier this field was misnamed "rate" */
};
-#define KDKBDREP _IO(KD_IOCTL_BASE, 0x52) /* set keyboard delay/repeat rate;
- * actually used values are returned
- */
+#define KDKBDREP 0x4B52 /* set keyboard delay/repeat rate;
+ * actually used values are returned */
-#define KDFONTOP _IO(KD_IOCTL_BASE, 0x72) /* font operations */
+#define KDFONTOP 0x4B72 /* font operations */
struct console_font_op {
unsigned int op; /* operation code KD_FONT_OP_* */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index a8188202413e..43742ac5b00d 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -148,6 +148,7 @@ enum {
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
NETDEV_A_QSTATS_RX_HW_DROPS,
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+ NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
NETDEV_A_QSTATS_RX_CSUM_NONE,
NETDEV_A_QSTATS_RX_CSUM_BAD,
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 67626d535316..95770941ee2c 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -126,8 +126,8 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
- __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
+ __u64 stx_subvol; /* Subvolume identifier */
__u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
diff --git a/init/Kconfig b/init/Kconfig
index 72404c1f2157..febdea2afc3b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -883,7 +883,7 @@ config GCC10_NO_ARRAY_BOUNDS
config CC_NO_ARRAY_BOUNDS
bool
- default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
+ default y if CC_IS_GCC && GCC_VERSION >= 90000 && GCC10_NO_ARRAY_BOUNDS
# Currently, disable -Wstringop-overflow for GCC globally.
config GCC_NO_STRINGOP_OVERFLOW
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index 76b32e65c03c..b33995e00ba9 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -27,10 +27,10 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
{
- if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+ if (req->cancel_seq_set && sequence == req->work.cancel_seq)
return true;
- req->flags |= REQ_F_CANCEL_SEQ;
+ req->cancel_seq_set = true;
req->work.cancel_seq = sequence;
return false;
}
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index d1c47a9d9215..7d3316fe9bfc 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -927,7 +927,11 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
struct io_wq_acct *acct = io_work_get_acct(wq, work);
unsigned long work_flags = work->flags;
- struct io_cb_cancel_data match;
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_item,
+ .data = work,
+ .cancel_all = false,
+ };
bool do_create;
/*
@@ -965,10 +969,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
raw_spin_unlock(&wq->lock);
/* fatal condition, failed to create the first worker */
- match.fn = io_wq_work_match_item,
- match.data = work,
- match.cancel_all = false,
-
io_acct_cancel_pending_work(wq, acct, &match);
}
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 816e93e7f949..c326e2127dd4 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1259,8 +1259,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct io_sq_data *sqd = ctx->sq_data;
- if (wq_has_sleeper(&sqd->wait))
- wake_up(&sqd->wait);
+ if (sqd->thread)
+ __set_notify_signal(sqd->thread);
return;
}
@@ -2058,6 +2058,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->file = NULL;
req->rsrc_node = NULL;
req->task = current;
+ req->cancel_seq_set = false;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 624ca9076a50..726e6367af4d 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -433,7 +433,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
{
if (req->flags & REQ_F_CAN_POLL)
return true;
- if (file_can_poll(req->file)) {
+ if (req->file && file_can_poll(req->file)) {
req->flags |= REQ_F_CAN_POLL;
return true;
}
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 4785d6af5fee..a0f32a255fd1 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -244,6 +244,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
struct io_ring_ctx *ctx = file->private_data;
size_t sz = vma->vm_end - vma->vm_start;
long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned int npages;
void *ptr;
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
@@ -253,8 +254,8 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
- return io_uring_mmap_pages(ctx, vma, ctx->ring_pages,
- ctx->n_ring_pages);
+ npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
ctx->n_sqe_pages);
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 883a1a665907..8c18ede595c4 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
}
/*
- * __io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - adjust busy loop timeout
* @ctx: pointer to io-uring context structure
* @iowq: pointer to io wait queue
* @ts: pointer to timespec or NULL
*
* Adjust the busy loop timeout according to timespec and busy poll timeout.
+ * If the specified NAPI timeout is bigger than the wait timeout, then adjust
+ * the NAPI timeout accordingly.
*/
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
struct timespec64 *ts)
@@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
if (ts) {
- struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
-
- if (timespec64_compare(ts, &poll_to_ts) > 0) {
- *ts = timespec64_sub(*ts, poll_to_ts);
- } else {
- u64 to = timespec64_to_ns(ts);
-
- do_div(to, 1000);
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
+ struct timespec64 poll_to_ts;
+
+ poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+ if (timespec64_compare(ts, &poll_to_ts) < 0) {
+ s64 poll_to_ns = timespec64_to_ns(ts);
+ if (poll_to_ns > 0) {
+ u64 val = poll_to_ns + 999;
+ do_div(val, (s64) 1000);
+ poll_to = val;
+ }
}
}
diff --git a/io_uring/net.c b/io_uring/net.c
index 0a48596429d9..7c98c4d50946 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1127,6 +1127,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
flags |= MSG_DONTWAIT;
retry_multishot:
+ kmsg->msg.msg_inq = -1;
+ kmsg->msg.msg_flags = 0;
+
if (io_do_buffer_select(req)) {
ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
if (unlikely(ret))
@@ -1134,9 +1137,6 @@ retry_multishot:
sr->buf = NULL;
}
- kmsg->msg.msg_inq = -1;
- kmsg->msg.msg_flags = 0;
-
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 2de5cca9504e..2e3b7b16effb 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -516,10 +516,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_FIXED] = {
.name = "READ_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE_FIXED] = {
.name = "WRITE_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_POLL_ADD] = {
@@ -582,10 +584,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ] = {
.name = "READ",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE] = {
.name = "WRITE",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_FADVISE] = {
@@ -692,6 +696,7 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_MULTISHOT] = {
.name = "READ_MULTISHOT",
+ .cleanup = io_readv_writev_cleanup,
},
[IORING_OP_WAITID] = {
.name = "WAITID",
diff --git a/io_uring/register.c b/io_uring/register.c
index ef8c908346a4..c0010a66a6f2 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -355,8 +355,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
}
if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
mutex_unlock(&sqd->lock);
io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
}
if (copy_to_user(arg, new_count, sizeof(new_count)))
@@ -380,8 +382,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
return 0;
err:
if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
mutex_unlock(&sqd->lock);
io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
}
return ret;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 65417c9553b1..570bfa6a31aa 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -249,6 +249,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = io_run_task_work_sig(ctx);
if (ret < 0) {
+ __set_current_state(TASK_RUNNING);
mutex_lock(&ctx->uring_lock);
if (list_empty(&ctx->rsrc_ref_list))
ret = 0;
@@ -1067,7 +1068,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
* branch doesn't expect non PAGE_SIZE'd chunks.
*/
iter->bvec = bvec;
- iter->nr_segs = bvec->bv_len;
iter->count -= offset;
iter->iov_offset = offset;
} else {
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index be8c680121e4..d6ef4f4f9cba 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
entry->rule.buflen += f_val;
f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str,
- (void **)&f->lsm_rule);
+ (void **)&f->lsm_rule,
+ GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (err == -EINVAL) {
@@ -799,7 +800,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
/* our own (refreshed) copy of lsm_rule */
ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
- (void **)&df->lsm_rule);
+ (void **)&df->lsm_rule, GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (ret == -EINVAL) {
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 583ee4fe48ef..e52b3ad231b9 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -212,6 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
+ atomic_t mmap_count;
};
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -221,20 +222,30 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
+ atomic_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
return 0;
}
+static void arena_vm_open(struct vm_area_struct *vma)
+{
+ struct vma_list *vml = vma->vm_private_data;
+
+ atomic_inc(&vml->mmap_count);
+}
+
static void arena_vm_close(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
- struct vma_list *vml;
+ struct vma_list *vml = vma->vm_private_data;
+ if (!atomic_dec_and_test(&vml->mmap_count))
+ return;
guard(mutex)(&arena->lock);
- vml = vma->vm_private_data;
+ /* update link list under lock */
list_del(&vml->head);
vma->vm_private_data = NULL;
kfree(vml);
@@ -287,6 +298,7 @@ out:
}
static const struct vm_operations_struct arena_vm_ops = {
+ .open = arena_vm_open,
.close = arena_vm_close,
.fault = arena_vm_fault,
};
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1a6c3faa6e4a..695a0fb2cd4d 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -736,11 +736,11 @@ static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
struct bpf_ksym *ksym;
- char *ret = NULL;
+ int ret = 0;
rcu_read_lock();
ksym = bpf_ksym_find(addr);
@@ -748,9 +748,8 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long symbol_start = ksym->start;
unsigned long symbol_end = ksym->end;
- strscpy(sym, ksym->name, KSYM_NAME_LEN);
+ ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
- ret = sym;
if (size)
*size = symbol_end - symbol_start;
if (off)
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 4e2cdbb5629f..7f3b34452243 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -760,9 +760,6 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
for (i = 0; i < dtab->n_buckets; i++) {
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_safe(dst, next, head, index_hlist) {
- if (!dst)
- continue;
-
if (is_ifindex_excluded(excluded_devices, num_excluded,
dst->dev->ifindex))
continue;
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 0ee653a936ea..e20b90c36131 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -51,7 +51,8 @@ struct bpf_ringbuf {
* This prevents a user-space application from modifying the
* position and ruining in-kernel tracking. The permissions of the
* pages depend on who is producing samples: user-space or the
- * kernel.
+ * kernel. Note that the pending counter is placed in the same
+ * page as the producer, so that it shares the same cache line.
*
* Kernel-producer
* ---------------
@@ -70,6 +71,7 @@ struct bpf_ringbuf {
*/
unsigned long consumer_pos __aligned(PAGE_SIZE);
unsigned long producer_pos __aligned(PAGE_SIZE);
+ unsigned long pending_pos;
char data[] __aligned(PAGE_SIZE);
};
@@ -179,6 +181,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
rb->mask = data_sz - 1;
rb->consumer_pos = 0;
rb->producer_pos = 0;
+ rb->pending_pos = 0;
return rb;
}
@@ -404,9 +407,9 @@ bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
{
- unsigned long cons_pos, prod_pos, new_prod_pos, flags;
- u32 len, pg_off;
+ unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
struct bpf_ringbuf_hdr *hdr;
+ u32 len, pg_off, tmp_size, hdr_len;
if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
return NULL;
@@ -424,13 +427,29 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
spin_lock_irqsave(&rb->spinlock, flags);
}
+ pend_pos = rb->pending_pos;
prod_pos = rb->producer_pos;
new_prod_pos = prod_pos + len;
- /* check for out of ringbuf space by ensuring producer position
- * doesn't advance more than (ringbuf_size - 1) ahead
+ while (pend_pos < prod_pos) {
+ hdr = (void *)rb->data + (pend_pos & rb->mask);
+ hdr_len = READ_ONCE(hdr->len);
+ if (hdr_len & BPF_RINGBUF_BUSY_BIT)
+ break;
+ tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
+ tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
+ pend_pos += tmp_size;
+ }
+ rb->pending_pos = pend_pos;
+
+ /* check for out of ringbuf space:
+ * - by ensuring producer position doesn't advance more than
+ * (ringbuf_size - 1) ahead
+ * - by ensuring oldest not yet committed record until newest
+ * record does not span more than (ringbuf_size - 1)
*/
- if (new_prod_pos - cons_pos > rb->mask) {
+ if (new_prod_pos - cons_pos > rb->mask ||
+ new_prod_pos - pend_pos > rb->mask) {
spin_unlock_irqrestore(&rb->spinlock, flags);
return NULL;
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2222c3ff88e7..f45ed6adc092 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2998,6 +2998,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog)
{
+ WARN_ON(ops->dealloc && ops->dealloc_deferred);
atomic64_set(&link->refcnt, 1);
link->type = type;
link->id = 0;
@@ -3056,16 +3057,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
+ const struct bpf_link_ops *ops = link->ops;
bool sleepable = false;
bpf_link_free_id(link->id);
if (link->prog) {
sleepable = link->prog->sleepable;
/* detach BPF program, clean up used resources */
- link->ops->release(link);
+ ops->release(link);
bpf_prog_put(link->prog);
}
- if (link->ops->dealloc_deferred) {
+ if (ops->dealloc_deferred) {
/* schedule BPF link deallocation; if underlying BPF program
* is sleepable, we need to first wait for RCU tasks trace
* sync, then go through "classic" RCU grace period
@@ -3074,9 +3076,8 @@ static void bpf_link_free(struct bpf_link *link)
call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
else
call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
- }
- if (link->ops->dealloc)
- link->ops->dealloc(link);
+ } else if (ops->dealloc)
+ ops->dealloc(link);
}
static void bpf_link_put_deferred(struct work_struct *work)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 77da1f438bec..214a9fa8c6fb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4549,11 +4549,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
env->bpf_capable) {
- struct bpf_reg_state fake_reg = {};
+ struct bpf_reg_state *tmp_reg = &env->fake_reg[0];
- __mark_reg_known(&fake_reg, insn->imm);
- fake_reg.type = SCALAR_VALUE;
- save_register_state(env, state, spi, &fake_reg, size);
+ memset(tmp_reg, 0, sizeof(*tmp_reg));
+ __mark_reg_known(tmp_reg, insn->imm);
+ tmp_reg->type = SCALAR_VALUE;
+ save_register_state(env, state, spi, tmp_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -6235,6 +6236,7 @@ static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
}
reg->u32_min_value = 0;
reg->u32_max_value = U32_MAX;
+ reg->var_off = tnum_subreg(tnum_unknown);
}
static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
@@ -6279,6 +6281,7 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
reg->s32_max_value = s32_max;
reg->u32_min_value = (u32)s32_min;
reg->u32_max_value = (u32)s32_max;
+ reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
return;
}
@@ -8882,7 +8885,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog);
- if (func_id != BPF_FUNC_map_update_elem)
+ if (func_id != BPF_FUNC_map_update_elem &&
+ func_id != BPF_FUNC_map_delete_elem)
return false;
/* It's not possible to get access to a locked struct sock in these
@@ -8893,6 +8897,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
if (eatype == BPF_TRACE_ITER)
return true;
break;
+ case BPF_PROG_TYPE_SOCK_OPS:
+ /* map_update allowed only via dedicated helpers with event type checks */
+ if (func_id == BPF_FUNC_map_delete_elem)
+ return true;
+ break;
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
@@ -8988,7 +8997,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -8998,7 +9006,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -11124,7 +11131,11 @@ BTF_ID(func, bpf_iter_css_task_new)
#else
BTF_ID_UNUSED
#endif
+#ifdef CONFIG_BPF_EVENTS
BTF_ID(func, bpf_session_cookie)
+#else
+BTF_ID_UNUSED
+#endif
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -12710,6 +12721,16 @@ static bool signed_add32_overflows(s32 a, s32 b)
return res < a;
}
+static bool signed_add16_overflows(s16 a, s16 b)
+{
+ /* Do the add in u16, where overflow is well-defined */
+ s16 res = (s16)((u16)a + (u16)b);
+
+ if (b < 0)
+ return res > a;
+ return res < a;
+}
+
static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
@@ -15105,7 +15126,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
- struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -15171,7 +15191,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
- src_reg = &fake_reg;
+ src_reg = &env->fake_reg[0];
+ memset(src_reg, 0, sizeof(*src_reg));
src_reg->type = SCALAR_VALUE;
__mark_reg_known(src_reg, insn->imm);
}
@@ -15231,10 +15252,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
&other_branch_regs[insn->src_reg],
dst_reg, src_reg, opcode, is_jmp32);
} else /* BPF_SRC(insn->code) == BPF_K */ {
+ /* reg_set_min_max() can mangle the fake_reg. Make a copy
+ * so that these are two different memory locations. The
+ * src_reg is not used beyond here in context of K.
+ */
+ memcpy(&env->fake_reg[1], &env->fake_reg[0],
+ sizeof(env->fake_reg[0]));
err = reg_set_min_max(env,
&other_branch_regs[insn->dst_reg],
- src_reg /* fake one */,
- dst_reg, src_reg /* same fake one */,
+ &env->fake_reg[0],
+ dst_reg, &env->fake_reg[1],
opcode, is_jmp32);
}
if (err)
@@ -17433,11 +17460,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
goto skip_inf_loop_check;
}
if (is_may_goto_insn_at(env, insn_idx)) {
- if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
+ if (sl->state.may_goto_depth != cur->may_goto_depth &&
+ states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
update_loop_entry(cur, &sl->state);
goto hit;
}
- goto skip_inf_loop_check;
}
if (calls_callback(env, insn_idx)) {
if (states_equal(env, &sl->state, cur, RANGE_WITHIN))
@@ -18715,6 +18742,39 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
+/*
+ * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the
+ * jump offset by 'delta'.
+ */
+static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
+{
+ struct bpf_insn *insn = prog->insnsi;
+ u32 insn_cnt = prog->len, i;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ u8 code = insn->code;
+
+ if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
+ BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
+ continue;
+
+ if (insn->code == (BPF_JMP32 | BPF_JA)) {
+ if (i + 1 + insn->imm != tgt_idx)
+ continue;
+ if (signed_add32_overflows(insn->imm, delta))
+ return -ERANGE;
+ insn->imm += delta;
+ } else {
+ if (i + 1 + insn->off != tgt_idx)
+ continue;
+ if (signed_add16_overflows(insn->imm, delta))
+ return -ERANGE;
+ insn->off += delta;
+ }
+ }
+ return 0;
+}
+
static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
u32 off, u32 cnt)
{
@@ -19989,7 +20049,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
stack_depth_extra = 8;
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off);
- insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+ if (insn->off >= 0)
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+ else
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1);
insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1);
insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off);
cnt = 4;
@@ -20305,7 +20368,7 @@ patch_map_ops_generic:
goto next_insn;
}
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
prog->jit_requested && bpf_jit_supports_percpu_insn()) {
@@ -20531,6 +20594,13 @@ next_insn:
if (!new_prog)
return -ENOMEM;
env->prog = prog = new_prog;
+ /*
+ * If may_goto is a first insn of a prog there could be a jmp
+ * insn that points to it, hence adjust all such jmps to point
+ * to insn after BPF_ST that inits may_goto count.
+ * Adjustment will succeed because bpf_patch_insn_data() didn't fail.
+ */
+ WARN_ON(adjust_jmp_off(env->prog, subprog_start, 1));
}
/* Since poke tab is now finalized, publish aux to tracker. */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 563877d6c28b..3d2bf1d50a0c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1859,6 +1859,9 @@ static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return fals
void __init bringup_nonboot_cpus(unsigned int max_cpus)
{
+ if (!max_cpus)
+ return;
+
/* Try parallel bringup optimization if enabled */
if (cpuhp_bringup_cpus_parallel(max_cpus))
return;
@@ -2446,7 +2449,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
* The caller needs to hold cpus read locked while calling this function.
* Return:
* On success:
- * Positive state number if @state is CPUHP_AP_ONLINE_DYN;
+ * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
* 0 for all other states
* On failure: proper (negative) error code
*/
@@ -2469,7 +2472,7 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
- dynstate = state == CPUHP_AP_ONLINE_DYN;
+ dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
@@ -2500,8 +2503,8 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
out:
mutex_unlock(&cpuhp_state_mutex);
/*
- * If the requested state is CPUHP_AP_ONLINE_DYN, return the
- * dynamically allocated state in case of success.
+ * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
+ * return the dynamically allocated state in case of success.
*/
if (!ret && dynstate)
return state;
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 02205ab53b7e..4950e0b622b1 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk;
int threads = map->bparam.threads;
int node = map->bparam.node;
- const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops;
int ret = 0;
int i;
@@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
if (IS_ERR(tsk[i])) {
pr_err("create dma_map thread failed\n");
ret = PTR_ERR(tsk[i]);
+ while (--i >= 0)
+ kthread_stop(tsk[i]);
goto out;
}
if (node != NUMA_NO_NODE)
- kthread_bind_mask(tsk[i], cpu_mask);
+ kthread_bind_mask(tsk[i], cpumask_of_node(node));
}
/* clear the old value in the previous benchmark */
@@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
msleep_interruptible(map->bparam.seconds * 1000);
- /* wait for the completion of benchmark threads */
+ /* wait for the completion of all started benchmark threads */
for (i = 0; i < threads; i++) {
- ret = kthread_stop(tsk[i]);
- if (ret)
- goto out;
+ int kthread_ret = kthread_stop_put(tsk[i]);
+
+ if (kthread_ret)
+ ret = kthread_ret;
}
+ if (ret)
+ goto out;
+
loops = atomic64_read(&map->loops);
if (likely(loops > 0)) {
u64 map_variance, unmap_variance;
@@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
}
out:
- for (i = 0; i < threads; i++)
- put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
@@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
}
if (map->bparam.node != NUMA_NO_NODE &&
- !node_possible(map->bparam.node)) {
+ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
+ !node_possible(map->bparam.node))) {
pr_err("invalid numa node\n");
return -EINVAL;
}
@@ -252,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
* dma_mask changed by benchmark
*/
dma_set_mask(map->dev, old_dma_mask);
+
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0128c5ff278..8f908f077935 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5384,6 +5384,7 @@ int perf_event_release_kernel(struct perf_event *event)
again:
mutex_lock(&event->child_mutex);
list_for_each_entry(child, &event->child_list, child_list) {
+ void *var = NULL;
/*
* Cannot change, child events are not migrated, see the
@@ -5424,11 +5425,23 @@ again:
* this can't be the last reference.
*/
put_event(event);
+ } else {
+ var = &ctx->refcount;
}
mutex_unlock(&event->child_mutex);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
+
+ if (var) {
+ /*
+ * If perf_event_free_task() has deleted all events from the
+ * ctx while the child_mutex got released above, make sure to
+ * notify about the preceding put_ctx().
+ */
+ smp_mb(); /* pairs with wait_var_event() */
+ wake_up_var(var);
+ }
goto again;
}
mutex_unlock(&event->child_mutex);
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 74a4ef1da9ad..fd75b4a484d7 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/mm.h>
#include "gcov.h"
-#if (__GNUC__ >= 10)
+#if (__GNUC__ >= 14)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ >= 10)
#define GCOV_COUNTERS 8
#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 6d443ea22bb7..383fd43ac612 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -14,7 +14,12 @@ include/
arch/$SRCARCH/include/
"
-type cpio > /dev/null
+if ! command -v cpio >/dev/null; then
+ echo >&2 "***"
+ echo >&2 "*** 'cpio' could not be found."
+ echo >&2 "***"
+ exit 1
+fi
# Support incremental builds by skipping archive generation
# if timestamps of files being archived are not changed.
@@ -84,7 +89,7 @@ find $cpio_dir -type f -print0 |
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
+ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
echo $headers_md5 > kernel/kheaders.md5
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 22ea19a36e6e..98b9622d372e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -388,12 +388,12 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
-static const char *kallsyms_lookup_buildid(unsigned long addr,
+static int kallsyms_lookup_buildid(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset, char **modname,
const unsigned char **modbuildid, char *namebuf)
{
- const char *ret;
+ int ret;
namebuf[KSYM_NAME_LEN - 1] = 0;
namebuf[0] = 0;
@@ -410,7 +410,7 @@ static const char *kallsyms_lookup_buildid(unsigned long addr,
if (modbuildid)
*modbuildid = NULL;
- ret = namebuf;
+ ret = strlen(namebuf);
goto found;
}
@@ -442,8 +442,13 @@ const char *kallsyms_lookup(unsigned long addr,
unsigned long *offset,
char **modname, char *namebuf)
{
- return kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
- NULL, namebuf);
+ int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
+ NULL, namebuf);
+
+ if (!ret)
+ return NULL;
+
+ return namebuf;
}
int lookup_symbol_name(unsigned long addr, char *symname)
@@ -478,19 +483,15 @@ static int __sprint_symbol(char *buffer, unsigned long address,
{
char *modname;
const unsigned char *buildid;
- const char *name;
unsigned long offset, size;
int len;
address += symbol_offset;
- name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
+ len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
buffer);
- if (!name)
+ if (!len)
return sprintf(buffer, "0x%lx", address - symbol_offset);
- if (name != buffer)
- strcpy(buffer, name);
- len = strlen(buffer);
offset -= symbol_offset;
if (add_offset)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index c3124f6d5536..f0a69d402066 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -632,6 +632,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
return -EINVAL;
kcov->mode = mode;
t->kcov = kcov;
+ t->kcov_mode = KCOV_MODE_REMOTE;
kcov->t = t;
kcov->remote = true;
kcov->remote_size = remote_arg->area_size;
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index 62fb57bb9f16..bf65e0c3c86f 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -321,14 +321,15 @@ void * __weak dereference_module_function_descriptor(struct module *mod,
* For kallsyms to ask for address resolution. NULL means not found. Careful
* not to lock to avoid deadlock on oopses, simply disable preemption.
*/
-const char *module_address_lookup(unsigned long addr,
- unsigned long *size,
- unsigned long *offset,
- char **modname,
- const unsigned char **modbuildid,
- char *namebuf)
+int module_address_lookup(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char **modname,
+ const unsigned char **modbuildid,
+ char *namebuf)
{
- const char *ret = NULL;
+ const char *sym;
+ int ret = 0;
struct module *mod;
preempt_disable();
@@ -344,12 +345,10 @@ const char *module_address_lookup(unsigned long addr,
#endif
}
- ret = find_kallsyms_symbol(mod, addr, size, offset);
- }
- /* Make a copy in here where it's safe */
- if (ret) {
- strscpy(namebuf, ret, KSYM_NAME_LEN);
- ret = namebuf;
+ sym = find_kallsyms_symbol(mod, addr, size, offset);
+
+ if (sym)
+ ret = strscpy(namebuf, sym, KSYM_NAME_LEN);
}
preempt_enable();
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index dc48fecfa1dc..25f3cf679b35 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -218,6 +218,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
*/
do {
clear_thread_flag(TIF_SIGPENDING);
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
rc = kernel_wait4(-1, NULL, __WALL, NULL);
} while (rc != -ECHILD);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index d9abb7ab031d..753b8dd42a59 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1595,7 +1595,7 @@ int swsusp_check(bool exclusive)
put:
if (error)
- fput(hib_resume_bdev_file);
+ bdev_fput(hib_resume_bdev_file);
else
pr_debug("Image signature found, resuming\n");
} else {
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index 040fe7d1eda2..39a2b61c7232 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y = printk.o conopt.o
+obj-y = printk.o
obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
obj-$(CONFIG_PRINTK_INDEX) += index.o
diff --git a/kernel/printk/conopt.c b/kernel/printk/conopt.c
deleted file mode 100644
index 9d507bac3657..000000000000
--- a/kernel/printk/conopt.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel command line console options for hardware based addressing
- *
- * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tony Lindgren <tony@atomide.com>
- */
-
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/errno.h>
-
-#include "console_cmdline.h"
-
-/*
- * Allow longer DEVNAME:0.0 style console naming such as abcd0000.serial:0.0
- * in addition to the legacy ttyS0 style naming.
- */
-#define CONSOLE_NAME_MAX 32
-
-#define CONSOLE_OPT_MAX 16
-#define CONSOLE_BRL_OPT_MAX 16
-
-struct console_option {
- char name[CONSOLE_NAME_MAX];
- char opt[CONSOLE_OPT_MAX];
- char brl_opt[CONSOLE_BRL_OPT_MAX];
- u8 has_brl_opt:1;
-};
-
-/* Updated only at console_setup() time, no locking needed */
-static struct console_option conopt[MAX_CMDLINECONSOLES];
-
-/**
- * console_opt_save - Saves kernel command line console option for driver use
- * @str: Kernel command line console name and option
- * @brl_opt: Braille console options
- *
- * Saves a kernel command line console option for driver subsystems to use for
- * adding a preferred console during init. Called from console_setup() only.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int __init console_opt_save(const char *str, const char *brl_opt)
-{
- struct console_option *con;
- size_t namelen, optlen;
- const char *opt;
- int i;
-
- namelen = strcspn(str, ",");
- if (namelen == 0 || namelen >= CONSOLE_NAME_MAX)
- return -EINVAL;
-
- opt = str + namelen;
- if (*opt == ',')
- opt++;
-
- optlen = strlen(opt);
- if (optlen >= CONSOLE_OPT_MAX)
- return -EINVAL;
-
- for (i = 0; i < MAX_CMDLINECONSOLES; i++) {
- con = &conopt[i];
-
- if (con->name[0]) {
- if (!strncmp(str, con->name, namelen))
- return 0;
- continue;
- }
-
- /*
- * The name isn't terminated, only opt is. Empty opt is fine,
- * but brl_opt can be either empty or NULL. For more info, see
- * _braille_console_setup().
- */
- strscpy(con->name, str, namelen + 1);
- strscpy(con->opt, opt, CONSOLE_OPT_MAX);
- if (brl_opt) {
- strscpy(con->brl_opt, brl_opt, CONSOLE_BRL_OPT_MAX);
- con->has_brl_opt = 1;
- }
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-static struct console_option *console_opt_find(const char *name)
-{
- struct console_option *con;
- int i;
-
- for (i = 0; i < MAX_CMDLINECONSOLES; i++) {
- con = &conopt[i];
- if (!strcmp(name, con->name))
- return con;
- }
-
- return NULL;
-}
-
-/**
- * add_preferred_console_match - Adds a preferred console if a match is found
- * @match: Expected console on kernel command line, such as console=DEVNAME:0.0
- * @name: Name of the console character device to add such as ttyS
- * @idx: Index for the console
- *
- * Allows driver subsystems to add a console after translating the command
- * line name to the character device name used for the console. Options are
- * added automatically based on the kernel command line. Duplicate preferred
- * consoles are ignored by __add_preferred_console().
- *
- * Return: 0 on success, negative error code on failure.
- */
-int add_preferred_console_match(const char *match, const char *name,
- const short idx)
-{
- struct console_option *con;
- char *brl_opt = NULL;
-
- if (!match || !strlen(match) || !name || !strlen(name) ||
- idx < 0)
- return -EINVAL;
-
- con = console_opt_find(match);
- if (!con)
- return -ENOENT;
-
- /*
- * See __add_preferred_console(). It checks for NULL brl_options to set
- * the preferred_console flag. Empty brl_opt instead of NULL leads into
- * the preferred_console flag not set, and CON_CONSDEV not being set,
- * and the boot console won't get disabled at the end of console_setup().
- */
- if (con->has_brl_opt)
- brl_opt = con->brl_opt;
-
- console_opt_add_preferred_console(name, idx, con->opt, brl_opt);
-
- return 0;
-}
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
index a125e0235589..3ca74ad391d6 100644
--- a/kernel/printk/console_cmdline.h
+++ b/kernel/printk/console_cmdline.h
@@ -2,12 +2,6 @@
#ifndef _CONSOLE_CMDLINE_H
#define _CONSOLE_CMDLINE_H
-#define MAX_CMDLINECONSOLES 8
-
-int console_opt_save(const char *str, const char *brl_opt);
-int console_opt_add_preferred_console(const char *name, const short idx,
- char *options, char *brl_options);
-
struct console_cmdline
{
char name[16]; /* Name of the driver */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 420fd310129d..dddb15f48d59 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -383,6 +383,9 @@ static int console_locked;
/*
* Array of consoles built from command line options (console=)
*/
+
+#define MAX_CMDLINECONSOLES 8
+
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
static int preferred_console = -1;
@@ -2500,17 +2503,6 @@ static int __init console_setup(char *str)
if (_braille_console_setup(&str, &brl_options))
return 1;
- /* Save the console for driver subsystem use */
- if (console_opt_save(str, brl_options))
- return 1;
-
- /* Flag register_console() to not call try_enable_default_console() */
- console_set_on_cmdline = 1;
-
- /* Don't attempt to parse a DEVNAME:0.0 style console */
- if (strchr(str, ':'))
- return 1;
-
/*
* Decode str into name, index, options.
*/
@@ -2541,13 +2533,6 @@ static int __init console_setup(char *str)
}
__setup("console=", console_setup);
-/* Only called from add_preferred_console_match() */
-int console_opt_add_preferred_console(const char *name, const short idx,
- char *options, char *brl_options)
-{
- return __add_preferred_console(name, idx, options, brl_options, true);
-}
-
/**
* add_preferred_console - add a device to the list of preferred consoles.
* @name: device name
@@ -3522,7 +3507,7 @@ void register_console(struct console *newcon)
* Note that a console with tty binding will have CON_CONSDEV
* flag set and will be first in the list.
*/
- if (preferred_console < 0 && !console_set_on_cmdline) {
+ if (preferred_console < 0) {
if (hlist_empty(&console_list) || !console_first()->device ||
console_first()->flags & CON_BOOT) {
try_enable_default_console(newcon);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d7eee421d4bc..b696b85ac63e 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -46,8 +46,8 @@ COND_SYSCALL(io_getevents_time32);
COND_SYSCALL(io_getevents);
COND_SYSCALL(io_pgetevents_time32);
COND_SYSCALL(io_pgetevents);
-COND_SYSCALL_COMPAT(io_pgetevents_time32);
COND_SYSCALL_COMPAT(io_pgetevents);
+COND_SYSCALL_COMPAT(io_pgetevents_time64);
COND_SYSCALL(io_uring_setup);
COND_SYSCALL(io_uring_enter);
COND_SYSCALL(io_uring_register);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 492c14aac642..b8ee320208d4 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1285,6 +1285,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
struct hrtimer_clock_base *base;
unsigned long flags;
+ if (WARN_ON_ONCE(!timer->function))
+ return;
/*
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
* match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d88b13076b79..a47bcf71defc 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -178,26 +178,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
}
}
-#ifdef CONFIG_NO_HZ_FULL
-static void giveup_do_timer(void *info)
-{
- int cpu = *(unsigned int *)info;
-
- WARN_ON(tick_do_timer_cpu != smp_processor_id());
-
- tick_do_timer_cpu = cpu;
-}
-
-static void tick_take_do_timer_from_boot(void)
-{
- int cpu = smp_processor_id();
- int from = tick_do_timer_boot_cpu;
-
- if (from >= 0 && from != cpu)
- smp_call_function_single(from, giveup_do_timer, &cpu, 1);
-}
-#endif
-
/*
* Setup the tick device
*/
@@ -221,19 +201,25 @@ static void tick_setup_device(struct tick_device *td,
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
- * The boot CPU may be nohz_full, in which case set
- * tick_do_timer_boot_cpu so the first housekeeping
- * secondary that comes up will take do_timer from
- * us.
+ * The boot CPU may be nohz_full, in which case the
+ * first housekeeping secondary will take do_timer()
+ * from it.
*/
if (tick_nohz_full_cpu(cpu))
tick_do_timer_boot_cpu = cpu;
- } else if (tick_do_timer_boot_cpu != -1 &&
- !tick_nohz_full_cpu(cpu)) {
- tick_take_do_timer_from_boot();
+ } else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
tick_do_timer_boot_cpu = -1;
- WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
+ /*
+ * The boot CPU will stay in periodic (NOHZ disabled)
+ * mode until clocksource_done_booting() called after
+ * smp_init() selects a high resolution clocksource and
+ * timekeeping_notify() kicks the NOHZ stuff alive.
+ *
+ * So this WRITE_ONCE can only race with the READ_ONCE
+ * check in tick_periodic() but this race is harmless.
+ */
+ WRITE_ONCE(tick_do_timer_cpu, cpu);
#endif
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 166ad5444eea..721c3b221048 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1136,7 +1136,7 @@ config PREEMPTIRQ_DELAY_TEST
config SYNTH_EVENT_GEN_TEST
tristate "Test module for in-kernel synthetic event generation"
- depends on SYNTH_EVENTS
+ depends on SYNTH_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel synthetic event definition and
@@ -1149,7 +1149,7 @@ config SYNTH_EVENT_GEN_TEST
config KPROBE_EVENT_GEN_TEST
tristate "Test module for in-kernel kprobe event generation"
- depends on KPROBE_EVENTS
+ depends on KPROBE_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel kprobe event definition.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f5154c051d2c..d1daeab1bbc1 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3295,7 +3295,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx;
int err = 0;
- if (link->task && current != link->task)
+ if (link->task && current->mm != link->task->mm)
return 0;
if (sleepable)
@@ -3396,8 +3396,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
cnt = attr->link_create.uprobe_multi.cnt;
+ pid = attr->link_create.uprobe_multi.pid;
- if (!upath || !uoffsets || !cnt)
+ if (!upath || !uoffsets || !cnt || pid < 0)
return -EINVAL;
if (cnt > MAX_UPROBE_MULTI_CNT)
return -E2BIG;
@@ -3421,11 +3422,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
goto error_path_put;
}
- pid = attr->link_create.uprobe_multi.pid;
if (pid) {
- rcu_read_lock();
- task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
- rcu_read_unlock();
+ task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
if (!task) {
err = -ESRCH;
goto error_path_put;
@@ -3519,7 +3517,6 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
}
#endif /* CONFIG_UPROBES */
-#ifdef CONFIG_FPROBE
__bpf_kfunc_start_defs();
__bpf_kfunc bool bpf_session_is_return(void)
@@ -3568,4 +3565,3 @@ static int __init bpf_kprobe_multi_kfuncs_init(void)
}
late_initcall(bpf_kprobe_multi_kfuncs_init);
-#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 65208d3b5ed9..eacab4020508 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6969,7 +6969,7 @@ allocate_ftrace_mod_map(struct module *mod,
return mod_map;
}
-static const char *
+static int
ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
@@ -6990,21 +6990,18 @@ ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
*size = found_func->size;
if (off)
*off = addr - found_func->ip;
- if (sym)
- strscpy(sym, found_func->name, KSYM_NAME_LEN);
-
- return found_func->name;
+ return strscpy(sym, found_func->name, KSYM_NAME_LEN);
}
- return NULL;
+ return 0;
}
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
struct ftrace_mod_map *mod_map;
- const char *ret = NULL;
+ int ret = 0;
/* mod_map is freed via call_rcu() */
preempt_disable();
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 5e263c141574..39877c80d6cb 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -554,6 +554,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
anon_offs = 0;
field = btf_find_struct_member(ctx->btf, type, fieldname,
&anon_offs);
+ if (IS_ERR(field)) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return PTR_ERR(field);
+ }
if (!field) {
trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
return -ENOENT;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8541fa1494ae..c98e3b3386ba 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -970,19 +970,17 @@ static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
static void __uprobe_trace_func(struct trace_uprobe *tu,
unsigned long func, struct pt_regs *regs,
- struct uprobe_cpu_buffer **ucbp,
+ struct uprobe_cpu_buffer *ucb,
struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
struct trace_event_buffer fbuffer;
- struct uprobe_cpu_buffer *ucb;
void *data;
int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp);
WARN_ON(call != trace_file->event_call);
- ucb = prepare_uprobe_buffer(tu, regs, ucbp);
if (WARN_ON_ONCE(ucb->dsize > PAGE_SIZE))
return;
@@ -1014,13 +1012,16 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
struct uprobe_cpu_buffer **ucbp)
{
struct event_file_link *link;
+ struct uprobe_cpu_buffer *ucb;
if (is_ret_probe(tu))
return 0;
+ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
+
rcu_read_lock();
trace_probe_for_each_link_rcu(link, &tu->tp)
- __uprobe_trace_func(tu, 0, regs, ucbp, link->file);
+ __uprobe_trace_func(tu, 0, regs, ucb, link->file);
rcu_read_unlock();
return 0;
@@ -1031,10 +1032,13 @@ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
struct uprobe_cpu_buffer **ucbp)
{
struct event_file_link *link;
+ struct uprobe_cpu_buffer *ucb;
+
+ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
rcu_read_lock();
trace_probe_for_each_link_rcu(link, &tu->tp)
- __uprobe_trace_func(tu, func, regs, ucbp, link->file);
+ __uprobe_trace_func(tu, func, regs, ucb, link->file);
rcu_read_unlock();
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 003474c9a77d..3fbaecfc88c2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,6 +125,7 @@ enum wq_internal_consts {
HIGHPRI_NICE_LEVEL = MIN_NICE,
WQ_NAME_LEN = 32,
+ WORKER_ID_LEN = 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
};
/*
@@ -2742,6 +2743,26 @@ static void worker_detach_from_pool(struct worker *worker)
complete(detach_completion);
}
+static int format_worker_id(char *buf, size_t size, struct worker *worker,
+ struct worker_pool *pool)
+{
+ if (worker->rescue_wq)
+ return scnprintf(buf, size, "kworker/R-%s",
+ worker->rescue_wq->name);
+
+ if (pool) {
+ if (pool->cpu >= 0)
+ return scnprintf(buf, size, "kworker/%d:%d%s",
+ pool->cpu, worker->id,
+ pool->attrs->nice < 0 ? "H" : "");
+ else
+ return scnprintf(buf, size, "kworker/u%d:%d",
+ pool->id, worker->id);
+ } else {
+ return scnprintf(buf, size, "kworker/dying");
+ }
+}
+
/**
* create_worker - create a new workqueue worker
* @pool: pool the new worker will belong to
@@ -2758,7 +2779,6 @@ static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker;
int id;
- char id_buf[23];
/* ID is needed to determine kthread name */
id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
@@ -2777,17 +2797,14 @@ static struct worker *create_worker(struct worker_pool *pool)
worker->id = id;
if (!(pool->flags & POOL_BH)) {
- if (pool->cpu >= 0)
- snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
- pool->attrs->nice < 0 ? "H" : "");
- else
- snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
+ char id_buf[WORKER_ID_LEN];
+ format_worker_id(id_buf, sizeof(id_buf), worker, pool);
worker->task = kthread_create_on_node(worker_thread, worker,
- pool->node, "kworker/%s", id_buf);
+ pool->node, "%s", id_buf);
if (IS_ERR(worker->task)) {
if (PTR_ERR(worker->task) == -EINTR) {
- pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
+ pr_err("workqueue: Interrupted when creating a worker thread \"%s\"\n",
id_buf);
} else {
pr_err_once("workqueue: Failed to create a worker thread: %pe",
@@ -3350,7 +3367,6 @@ woke_up:
raw_spin_unlock_irq(&pool->lock);
set_pf_worker(false);
- set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry));
@@ -5542,6 +5558,7 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
static int init_rescuer(struct workqueue_struct *wq)
{
struct worker *rescuer;
+ char id_buf[WORKER_ID_LEN];
int ret;
if (!(wq->flags & WQ_MEM_RECLAIM))
@@ -5555,7 +5572,9 @@ static int init_rescuer(struct workqueue_struct *wq)
}
rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
+ format_worker_id(id_buf, sizeof(id_buf), rescuer, NULL);
+
+ rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", id_buf);
if (IS_ERR(rescuer->task)) {
ret = PTR_ERR(rescuer->task);
pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
@@ -6384,19 +6403,15 @@ void show_freezable_workqueues(void)
/* used to show worker information through /proc/PID/{comm,stat,status} */
void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
{
- int off;
-
- /* always show the actual comm */
- off = strscpy(buf, task->comm, size);
- if (off < 0)
- return;
-
/* stabilize PF_WQ_WORKER and worker pool association */
mutex_lock(&wq_pool_attach_mutex);
if (task->flags & PF_WQ_WORKER) {
struct worker *worker = kthread_data(task);
struct worker_pool *pool = worker->pool;
+ int off;
+
+ off = format_worker_id(buf, size, worker, pool);
if (pool) {
raw_spin_lock_irq(&pool->lock);
@@ -6415,6 +6430,8 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
}
raw_spin_unlock_irq(&pool->lock);
}
+ } else {
+ strscpy(buf, task->comm, size);
}
mutex_unlock(&wq_pool_attach_mutex);
diff --git a/lib/Kconfig b/lib/Kconfig
index d33a268bc256..b0a76dff5c18 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "Set number of CPUs at compile time"
- depends on SMP && EXPERT && !COMPILE_TEST
- help
- Say Yes if you have NR_CPUS set to an actual number of possible
- CPUs in your system, not to a default value. This forces the core
- code to rely on compile-time value and optimize kernel routines
- better.
+ def_bool !SMP
config CPU_RMAP
bool
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 11ed973ac359..c347b8b72d78 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -227,6 +227,7 @@ struct page_ext_operations page_alloc_tagging_ops = {
};
EXPORT_SYMBOL(page_alloc_tagging_ops);
+#ifdef CONFIG_SYSCTL
static struct ctl_table memory_allocation_profiling_sysctls[] = {
{
.procname = "mem_profiling",
@@ -241,6 +242,17 @@ static struct ctl_table memory_allocation_profiling_sysctls[] = {
{ }
};
+static void __init sysctl_init(void)
+{
+ if (!mem_profiling_support)
+ memory_allocation_profiling_sysctls[0].mode = 0444;
+
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+}
+#else /* CONFIG_SYSCTL */
+static inline void sysctl_init(void) {}
+#endif /* CONFIG_SYSCTL */
+
static int __init alloc_tag_init(void)
{
const struct codetag_type_desc desc = {
@@ -253,9 +265,7 @@ static int __init alloc_tag_init(void)
if (IS_ERR(alloc_tag_cttype))
return PTR_ERR(alloc_tag_cttype);
- if (!mem_profiling_support)
- memory_allocation_profiling_sysctls[0].mode = 0444;
- register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+ sysctl_init();
procfs_init();
return 0;
diff --git a/lib/closure.c b/lib/closure.c
index 07409e9e35a5..c971216d9d77 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,14 +13,25 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
- BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+ if (WARN(flags & CLOSURE_GUARD_MASK,
+ "closure has guard bits set: %x (%u)",
+ flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
+ r &= ~CLOSURE_GUARD_MASK;
+
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- if (!r) {
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -139,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
struct closure_syncer s = { .task = current };
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 39da5b3bc649..e17d520f532c 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -236,9 +236,6 @@ static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
kfree(p)); \
checker(expected_size, __kmalloc(alloc_size, gfp), \
kfree(p)); \
- checker(expected_size, \
- __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
- kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
@@ -377,7 +374,7 @@ static const char * const test_strs[] = {
for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
len = strlen(test_strs[i]); \
KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
- checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \
+ checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
kfree(p)); \
checker(len, kmemdup(test_strs[i], len, gfp), \
kfree(p)); \
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index d3b64b10da1c..9a17ee9af93a 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -311,4 +311,5 @@ module_exit(primes_exit);
module_param_named(selftest, selftest_max, ulong, 0400);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Prime number library");
MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
index 01611ddff420..47486a95f088 100644
--- a/lib/math/rational-test.c
+++ b/lib/math/rational-test.c
@@ -53,4 +53,5 @@ static struct kunit_suite rational_test_suite = {
kunit_test_suites(&rational_test_suite);
+MODULE_DESCRIPTION("Rational fractions unit test");
MODULE_LICENSE("GPL v2");
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 4ef31b0bb74d..d305b0c054bb 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1178,14 +1178,28 @@ struct foo {
s16 array[] __counted_by(counter);
};
+struct bar {
+ int a;
+ u32 counter;
+ s16 array[];
+};
+
static void DEFINE_FLEX_test(struct kunit *test)
{
- DEFINE_RAW_FLEX(struct foo, two, array, 2);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+#if __has_attribute(__counted_by__)
+ int expected_raw_size = sizeof(struct foo);
+#else
+ int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
+#endif
+ /* Without annotation, it will always be on-stack size. */
+ DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
- KUNIT_EXPECT_EQ(test, __struct_size(two),
- sizeof(struct foo) + sizeof(s16) + sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
}
diff --git a/lib/string_helpers_kunit.c b/lib/string_helpers_kunit.c
index f88e39fd68d6..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/string_helpers_kunit.c
@@ -625,4 +625,5 @@ static struct kunit_suite string_helpers_test_suite = {
kunit_test_suites(&string_helpers_test_suite);
+MODULE_DESCRIPTION("Test cases for string helpers module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index 2a812decf14b..c919e3293da6 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -633,4 +633,5 @@ static struct kunit_suite string_test_suite = {
kunit_test_suites(&string_test_suite);
+MODULE_DESCRIPTION("Test cases for string functions");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_dynamic_debug.c b/lib/test_dynamic_debug.c
index 8dd250ad022b..77c2a669b6af 100644
--- a/lib/test_dynamic_debug.c
+++ b/lib/test_dynamic_debug.c
@@ -162,4 +162,5 @@ module_init(test_dynamic_debug_init);
module_exit(test_dynamic_debug_exit);
MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("Kernel module for testing dynamic_debug");
MODULE_LICENSE("GPL");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 42b585208249..c63db03ebb9d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
module_init(test_rht_init);
module_exit(test_rht_exit);
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
MODULE_LICENSE("GPL v2");
diff --git a/mm/compaction.c b/mm/compaction.c
index e731d45befc7..739b1bf3d637 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -79,6 +79,13 @@ static inline bool is_via_compact_memory(int order) { return false; }
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
+static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
+{
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ return page;
+}
+#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
+
static void split_map_pages(struct list_head *freepages)
{
unsigned int i, order;
@@ -93,7 +100,7 @@ static void split_map_pages(struct list_head *freepages)
nr_pages = 1 << order;
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ mark_allocated(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
@@ -122,7 +129,7 @@ static unsigned long release_free_list(struct list_head *freepages)
* Convert free pages into post allocation pages, so
* that we can free them via __free_page.
*/
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ mark_allocated(page, order, __GFP_MOVABLE);
__free_pages(page, order);
if (pfn > high_pfn)
high_pfn = pfn;
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index b104a353b532..e4969fb54da3 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -40,22 +40,7 @@
* Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
* expectations that are being validated here. All future changes in here
* or the documentation need to be in sync.
- *
- * On s390 platform, the lower 4 bits are used to identify given page table
- * entry type. But these bits might affect the ability to clear entries with
- * pxx_clear() because of how dynamic page table folding works on s390. So
- * while loading up the entries do not change the lower 4 bits. It does not
- * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
- * used to mark a pte entry.
*/
-#define S390_SKIP_MASK GENMASK(3, 0)
-#if __BITS_PER_LONG == 64
-#define PPC64_SKIP_MASK GENMASK(62, 62)
-#else
-#define PPC64_SKIP_MASK 0x0
-#endif
-#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
-#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
#define RANDOM_NZVALUE GENMASK(7, 0)
struct pgtable_debug_args {
@@ -511,8 +496,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PUD clear\n");
- pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pudp, pud);
+ WARN_ON(pud_none(pud));
pud_clear(args->pudp);
pud = READ_ONCE(*args->pudp);
WARN_ON(!pud_none(pud));
@@ -548,8 +532,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating P4D clear\n");
- p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->p4dp, p4d);
+ WARN_ON(p4d_none(p4d));
p4d_clear(args->p4dp);
p4d = READ_ONCE(*args->p4dp);
WARN_ON(!p4d_none(p4d));
@@ -582,8 +565,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PGD clear\n");
- pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pgdp, pgd);
+ WARN_ON(pgd_none(pgd));
pgd_clear(args->pgdp);
pgd = READ_ONCE(*args->pgdp);
WARN_ON(!pgd_none(pgd));
@@ -634,10 +616,8 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
if (WARN_ON(!args->ptep))
return;
-#ifndef CONFIG_RISCV
- pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
-#endif
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
+ WARN_ON(pte_none(pte));
flush_dcache_page(page);
barrier();
ptep_clear(args->mm, args->vaddr, args->ptep);
@@ -650,8 +630,7 @@ static void __init pmd_clear_tests(struct pgtable_debug_args *args)
pmd_t pmd = READ_ONCE(*args->pmdp);
pr_debug("Validating PMD clear\n");
- pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pmdp, pmd);
+ WARN_ON(pmd_none(pmd));
pmd_clear(args->pmdp);
pmd = READ_ONCE(*args->pmdp);
WARN_ON(!pmd_none(pmd));
diff --git a/mm/filemap.c b/mm/filemap.c
index 382c3d06bfb1..876cc64aadd7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
do {
cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
- folio = __folio_alloc_node(gfp, order, n);
+ folio = __folio_alloc_node_noprof(gfp, order, n);
} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
return folio;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 317de2afd371..db7946a0a28c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -558,15 +558,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
-DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
-DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
static struct attribute *stats_attrs[] = {
&anon_fault_alloc_attr.attr,
&anon_fault_fallback_attr.attr,
&anon_fault_fallback_charge_attr.attr,
- &anon_swpout_attr.attr,
- &anon_swpout_fallback_attr.attr,
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
NULL,
};
@@ -3009,30 +3009,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
- /* Cannot split anonymous THP to order-1 */
- if (new_order == 1 && folio_test_anon(folio)) {
- VM_WARN_ONCE(1, "Cannot split to order-1 folio");
- return -EINVAL;
- }
-
- if (new_order) {
- /* Only swapping a whole PMD-mapped folio is supported */
- if (folio_test_swapcache(folio))
+ if (folio_test_anon(folio)) {
+ /* order-1 is not supported for anonymous THP. */
+ if (new_order == 1) {
+ VM_WARN_ONCE(1, "Cannot split to order-1 folio");
return -EINVAL;
+ }
+ } else if (new_order) {
/* Split shmem folio to non-zero order not supported */
if (shmem_mapping(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split shmem folio to non-0 order");
return -EINVAL;
}
- /* No split if the file system does not support large folio */
- if (!mapping_large_folio_support(folio->mapping)) {
+ /*
+ * No split if the file system does not support large folio.
+ * Note that we might still have THPs in such mappings due to
+ * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
+ * does not actually support large folios properly.
+ */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split file folio to non-0 order");
return -EINVAL;
}
}
+ /* Only swapping a whole PMD-mapped folio is supported */
+ if (folio_test_swapcache(folio) && new_order)
+ return -EINVAL;
is_hzp = is_huge_zero_folio(folio);
if (is_hzp) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6be78e7d4f6e..f35abff8be60 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5768,8 +5768,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* do_exit() will not see it, and will keep the reservation
* forever.
*/
- if (adjust_reservation && vma_needs_reservation(h, vma, address))
- vma_add_reservation(h, vma, address);
+ if (adjust_reservation) {
+ int rc = vma_needs_reservation(h, vma, address);
+
+ if (rc < 0)
+ /* Pressumably allocate_file_region_entries failed
+ * to allocate a file_region struct. Clear
+ * hugetlb_restore_reserve so that global reserve
+ * count will not be incremented by free_huge_folio.
+ * Act as if we consumed the reservation.
+ */
+ folio_clear_hugetlb_restore_reserve(page_folio(page));
+ else if (rc)
+ vma_add_reservation(h, vma, address);
+ }
tlb_remove_page_size(tlb, page, huge_page_size(h));
/*
diff --git a/mm/internal.h b/mm/internal.h
index b2c75b12014e..6902b7dd8509 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -588,7 +588,6 @@ extern void __putback_isolated_page(struct page *page, unsigned int order,
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order);
-extern void kernel_init_pages(struct page *page, int numpages);
/*
* This will have no effect, other than possibly generating a warning, if the
@@ -1436,11 +1435,6 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority);
#ifdef CONFIG_64BIT
-/* VM is sealed, in vm_flags */
-#define VM_SEALED _BITUL(63)
-#endif
-
-#ifdef CONFIG_64BIT
static inline int can_do_mseal(unsigned long flags)
{
if (flags)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index e7c9a4dc89f8..85e7c6b4575c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -532,7 +532,7 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
return;
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
- unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+ unpoison_slab_object(slab->slab_cache, ptr, flags, false);
/* Poison the redzone and save alloc info for kmalloc() allocations. */
if (is_kmalloc_cache(slab->slab_cache))
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index cf2d70e9c9a5..95f859e38c53 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
u32 origin, bool checked)
{
u64 address = (u64)addr;
- void *shadow_start;
- u32 *origin_start;
+ u32 *shadow_start, *origin_start;
size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
origin_start =
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
- for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
- origin_start[i] = origin;
+ /*
+ * If the new origin is non-zero, assume that the shadow byte is also non-zero,
+ * and unconditionally overwrite the old origin slot.
+ * If the new origin is zero, overwrite the old origin slot iff the
+ * corresponding shadow slot is zero.
+ */
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+ if (origin || !shadow_start[i])
+ origin_start[i] = origin;
+ }
}
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
diff --git a/mm/ksm.c b/mm/ksm.c
index 452ac8346e6e..34c4820e0d3d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
static bool ksm_smart_scan = true;
/* The number of zero pages which is placed by KSM */
-unsigned long ksm_zero_pages;
+atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
@@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* the dirty bit in zero page's PTE is set.
*/
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
- ksm_zero_pages++;
- mm->ksm_zero_pages++;
+ ksm_map_zero_page(mm);
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we
@@ -2754,18 +2753,16 @@ static void ksm_do_scan(unsigned int scan_npages)
{
struct ksm_rmap_item *rmap_item;
struct page *page;
- unsigned int npages = scan_npages;
- while (npages-- && likely(!freezing(current))) {
+ while (scan_npages-- && likely(!freezing(current))) {
cond_resched();
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
cmp_and_merge_page(page, rmap_item);
put_page(page);
+ ksm_pages_scanned++;
}
-
- ksm_pages_scanned += scan_npages - npages;
}
static int ksmd_should_run(void)
@@ -3376,7 +3373,7 @@ static void wait_while_offlining(void)
#ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *mm)
{
- return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
+ return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
}
#endif /* CONFIG_PROC_FS */
@@ -3665,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
}
KSM_ATTR_RO(ksm_zero_pages);
@@ -3674,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
{
long general_profit;
- general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
+ general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
ksm_rmap_items * sizeof(struct ksm_rmap_item);
return sysfs_emit(buf, "%ld\n", general_profit);
diff --git a/mm/memblock.c b/mm/memblock.c
index d09136e040d3..e81fb68f7f88 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -754,7 +754,7 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
/* calculate lose page */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
- if (nid == NUMA_NO_NODE)
+ if (!numa_valid_node(nid))
nr_pages += end_pfn - start_pfn;
}
@@ -1061,7 +1061,7 @@ static bool should_skip_region(struct memblock_type *type,
return false;
/* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
+ if (numa_valid_node(nid) && nid != m_nid)
return true;
/* skip hotpluggable memory regions if needed */
@@ -1118,10 +1118,6 @@ void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
int idx_a = *idx & 0xffffffff;
int idx_b = *idx >> 32;
- if (WARN_ONCE(nid == MAX_NUMNODES,
- "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
for (; idx_a < type_a->cnt; idx_a++) {
struct memblock_region *m = &type_a->regions[idx_a];
@@ -1215,9 +1211,6 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
int idx_a = *idx & 0xffffffff;
int idx_b = *idx >> 32;
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
if (*idx == (u64)ULLONG_MAX) {
idx_a = type_a->cnt - 1;
if (type_b != NULL)
@@ -1303,7 +1296,7 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
- if (nid == MAX_NUMNODES || nid == r_nid)
+ if (!numa_valid_node(nid) || nid == r_nid)
break;
}
if (*idx >= type->cnt) {
@@ -1448,9 +1441,6 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
enum memblock_flags flags = choose_memblock_flags();
phys_addr_t found;
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
if (!align) {
/* Can't use WARNs this early in boot on powerpc */
dump_stack();
@@ -1463,7 +1453,7 @@ again:
if (found && !memblock_reserve(found, size))
goto done;
- if (nid != NUMA_NO_NODE && !exact_nid) {
+ if (numa_valid_node(nid) && !exact_nid) {
found = memblock_find_in_range_node(size, align, start,
end, NUMA_NO_NODE,
flags);
@@ -1983,7 +1973,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
end = base + size - 1;
flags = rgn->flags;
#ifdef CONFIG_NUMA
- if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+ if (numa_valid_node(memblock_get_region_node(rgn)))
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
#endif
@@ -2177,7 +2167,7 @@ static void __init memmap_init_reserved_pages(void)
start = region->base;
end = start + region->size;
- if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
+ if (!numa_valid_node(nid))
nid = early_pfn_to_nid(PFN_DOWN(start));
reserve_bootmem_region(start, end, nid);
@@ -2268,7 +2258,7 @@ static int memblock_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%4d: ", i);
seq_printf(m, "%pa..%pa ", &reg->base, &end);
- if (nid != MAX_NUMNODES)
+ if (numa_valid_node(nid))
seq_printf(m, "%4d ", nid);
else
seq_printf(m, "%4c ", 'x');
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7fad15b2290c..71fe2a95b8bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3147,8 +3147,6 @@ static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
struct mem_cgroup *memcg;
struct lruvec *lruvec;
- lockdep_assert_irqs_disabled();
-
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
@@ -7747,8 +7745,7 @@ void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
* @new: Replacement folio.
*
* Charge @new as a replacement folio for @old. @old will
- * be uncharged upon free. This is only used by the page cache
- * (in replace_page_cache_folio()).
+ * be uncharged upon free.
*
* Both folios must be locked, @new->mapping must be set up.
*/
diff --git a/mm/memory.c b/mm/memory.c
index 0f47a533014e..d10e616d7389 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1507,12 +1507,6 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
if (unlikely(folio_mapcount(folio) < 0))
print_bad_pte(vma, addr, ptent, page);
}
-
- if (want_init_mlocked_on_free() && folio_test_mlocked(folio) &&
- !delay_rmap && folio_test_anon(folio)) {
- kernel_init_pages(page, folio_nr_pages(folio));
- }
-
if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
*force_flush = true;
*force_break = true;
@@ -4614,8 +4608,9 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
+ if (folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
+ page = &folio->page;
/*
* Just backoff if any subpage of a THP is corrupted otherwise
@@ -5106,10 +5101,16 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
bool ignore_writable, bool pte_write_upgrade)
{
int nr = pte_pfn(fault_pte) - folio_pfn(folio);
- unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start);
- unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end);
- pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE;
- unsigned long addr;
+ unsigned long start, end, addr = vmf->address;
+ unsigned long addr_start = addr - (nr << PAGE_SHIFT);
+ unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
+ pte_t *start_ptep;
+
+ /* Stay within the VMA and within the page table. */
+ start = max3(addr_start, pt_start, vma->vm_start);
+ end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
+ vma->vm_end);
+ start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
/* Restore all PTEs' mapping of the large folio */
for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
diff --git a/mm/mempool.c b/mm/mempool.c
index 6ece63a00acf..3223337135d0 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
{
mempool_t *pool;
- pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
+ pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
if (!pool)
return NULL;
diff --git a/mm/migrate.c b/mm/migrate.c
index dd04f578c19c..20cb9f5f7446 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1654,7 +1654,16 @@ static int migrate_pages_batch(struct list_head *from,
/*
* The rare folio on the deferred split list should
- * be split now. It should not count as a failure.
+ * be split now. It should not count as a failure:
+ * but increment nr_failed because, without doing so,
+ * migrate_pages() may report success with (split but
+ * unmigrated) pages still on its fromlist; whereas it
+ * always reports success when its fromlist is empty.
+ * stats->nr_thp_failed should be increased too,
+ * otherwise stats inconsistency will happen when
+ * migrate_pages_batch is called via migrate_pages()
+ * with MIGRATE_SYNC and MIGRATE_ASYNC.
+ *
* Only check it without removing it from the list.
* Since the folio can be on deferred_split_scan()
* local list and removing it can cause the local list
@@ -1669,6 +1678,8 @@ static int migrate_pages_batch(struct list_head *from,
if (nr_pages > 2 &&
!list_empty(&folio->_deferred_list)) {
if (try_split_folio(folio, split_folios) == 0) {
+ nr_failed++;
+ stats->nr_thp_failed += is_thp;
stats->nr_thp_split += is_thp;
stats->nr_split++;
continue;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index f72b852bd5b8..3ec04933f7fd 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2523,9 +2523,6 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free);
-DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
-EXPORT_SYMBOL(init_mlocked_on_free);
-
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
@@ -2543,14 +2540,6 @@ static int __init early_init_on_free(char *buf)
}
early_param("init_on_free", early_init_on_free);
-static bool _init_mlocked_on_free_enabled_early __read_mostly
- = IS_ENABLED(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON);
-static int __init early_init_mlocked_on_free(char *buf)
-{
- return kstrtobool(buf, &_init_mlocked_on_free_enabled_early);
-}
-early_param("init_mlocked_on_free", early_init_mlocked_on_free);
-
DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
/*
@@ -2578,21 +2567,12 @@ static void __init mem_debugging_and_hardening_init(void)
}
#endif
- if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early ||
- _init_mlocked_on_free_enabled_early) &&
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
page_poisoning_requested) {
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc, init_on_free "
- "and init_mlocked_on_free\n");
+ "will take precedence over init_on_alloc and init_on_free\n");
_init_on_alloc_enabled_early = false;
_init_on_free_enabled_early = false;
- _init_mlocked_on_free_enabled_early = false;
- }
-
- if (_init_mlocked_on_free_enabled_early && _init_on_free_enabled_early) {
- pr_info("mem auto-init: init_on_free is on, "
- "will take precedence over init_mlocked_on_free\n");
- _init_mlocked_on_free_enabled_early = false;
}
if (_init_on_alloc_enabled_early) {
@@ -2609,17 +2589,9 @@ static void __init mem_debugging_and_hardening_init(void)
static_branch_disable(&init_on_free);
}
- if (_init_mlocked_on_free_enabled_early) {
- want_check_pages = true;
- static_branch_enable(&init_mlocked_on_free);
- } else {
- static_branch_disable(&init_mlocked_on_free);
- }
-
- if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early ||
- _init_on_free_enabled_early || _init_mlocked_on_free_enabled_early))
- pr_info("mem auto-init: please make sure init_on_alloc, init_on_free and "
- "init_mlocked_on_free are disabled when running KMSAN\n");
+ if (IS_ENABLED(CONFIG_KMSAN) &&
+ (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
+ pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) {
@@ -2658,10 +2630,9 @@ static void __init report_meminit(void)
else
stack = "off";
- pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s, mlocked free:%s\n",
+ pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
- want_init_on_free() ? "on" : "off",
- want_init_mlocked_on_free() ? "on" : "off");
+ want_init_on_free() ? "on" : "off");
if (want_init_on_free())
pr_info("mem auto-init: clearing system memory may take some time...\n");
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2e22ce5675ca..9ecf99190ea2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -504,10 +504,15 @@ out:
static inline unsigned int order_to_pindex(int migratetype, int order)
{
+ bool __maybe_unused movable;
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != HPAGE_PMD_ORDER);
- return NR_LOWORDER_PCP_LISTS;
+
+ movable = migratetype == MIGRATE_MOVABLE;
+
+ return NR_LOWORDER_PCP_LISTS + movable;
}
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -521,7 +526,7 @@ static inline int pindex_to_order(unsigned int pindex)
int order = pindex / MIGRATE_PCPTYPES;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pindex == NR_LOWORDER_PCP_LISTS)
+ if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -1016,7 +1021,7 @@ static inline bool should_skip_kasan_poison(struct page *page)
return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
-void kernel_init_pages(struct page *page, int numpages)
+static void kernel_init_pages(struct page *page, int numpages)
{
int i;
@@ -1955,10 +1960,12 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
}
/*
- * Reserve a pageblock for exclusive use of high-order atomic allocations if
- * there are no empty page blocks that contain a page with a suitable order
+ * Reserve the pageblock(s) surrounding an allocation request for
+ * exclusive use of high-order atomic allocations if there are no
+ * empty page blocks that contain a page with a suitable order
*/
-static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
+static void reserve_highatomic_pageblock(struct page *page, int order,
+ struct zone *zone)
{
int mt;
unsigned long max_managed, flags;
@@ -1984,10 +1991,17 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
/* Yoink! */
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
- if (migratetype_is_mergeable(mt))
- if (move_freepages_block(zone, page, mt,
- MIGRATE_HIGHATOMIC) != -1)
- zone->nr_reserved_highatomic += pageblock_nr_pages;
+ if (!migratetype_is_mergeable(mt))
+ goto out_unlock;
+
+ if (order < pageblock_order) {
+ if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
+ goto out_unlock;
+ zone->nr_reserved_highatomic += pageblock_nr_pages;
+ } else {
+ change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
+ zone->nr_reserved_highatomic += 1 << order;
+ }
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
@@ -1999,7 +2013,7 @@ out_unlock:
* intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM.
*
- * If @force is true, try to unreserve a pageblock even though highatomic
+ * If @force is true, try to unreserve pageblocks even though highatomic
* pageblock is exhausted.
*/
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
@@ -2041,6 +2055,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* adjust the count once.
*/
if (is_migrate_highatomic(mt)) {
+ unsigned long size;
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -2048,9 +2063,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* while unreserving so be safe and watch for
* underflows.
*/
- zone->nr_reserved_highatomic -= min(
- pageblock_nr_pages,
- zone->nr_reserved_highatomic);
+ size = max(pageblock_nr_pages, 1UL << order);
+ size = min(size, zone->nr_reserved_highatomic);
+ zone->nr_reserved_highatomic -= size;
}
/*
@@ -2062,11 +2077,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed
* may increase.
*/
- ret = move_freepages_block(zone, page, mt,
- ac->migratetype);
+ if (order < pageblock_order)
+ ret = move_freepages_block(zone, page, mt,
+ ac->migratetype);
+ else {
+ move_to_free_list(page, zone, order, mt,
+ ac->migratetype);
+ change_pageblock_range(page, order,
+ ac->migratetype);
+ ret = 1;
+ }
/*
- * Reserving this block already succeeded, so this should
- * not fail on zone boundaries.
+ * Reserving the block(s) already succeeded,
+ * so this should not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
if (ret > 0) {
@@ -3406,7 +3429,7 @@ try_this_zone:
* if the pageblock should be reserved for the future
*/
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
- reserve_highatomic_pageblock(page, zone);
+ reserve_highatomic_pageblock(page, order, zone);
return page;
} else {
diff --git a/mm/page_io.c b/mm/page_io.c
index 46c603dddf04..0a150c240bf4 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -217,7 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
count_memcg_folio_events(folio, THP_SWPOUT, 1);
count_vm_event(THP_SWPOUT);
}
- count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
#endif
count_vm_events(PSWPOUT, folio_nr_pages(folio));
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 4169576bed72..509c6ef8de40 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -73,6 +73,9 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -110,6 +113,9 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -140,7 +146,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
BUG_ON(PageSlab(page));
page_ext = page_ext_get(page);
- BUG_ON(!page_ext);
+
+ if (!page_ext)
+ return;
+
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
diff --git a/mm/shmem.c b/mm/shmem.c
index f5d60436b604..a8b181a63402 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1786,7 +1786,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, old, new);
if (!error) {
- mem_cgroup_migrate(old, new);
+ mem_cgroup_replace_folio(old, new);
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
diff --git a/mm/slub.c b/mm/slub.c
index 0809760cf789..4927edec6a8c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1952,7 +1952,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
- old_exts = slab->obj_exts;
+ old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) {
/*
@@ -1961,7 +1961,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned.
*/
slab->obj_exts = new_exts;
- } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
+ cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
/*
* If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing
@@ -3901,7 +3902,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
unsigned int orig_size)
{
unsigned int zero_size = s->object_size;
- struct slabobj_ext *obj_exts;
bool kasan_init = init;
size_t i;
gfp_t init_flags = flags & gfp_allowed_mask;
@@ -3944,9 +3944,11 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, init_flags);
kmsan_slab_alloc(s, p[i], init_flags);
+#ifdef CONFIG_MEM_ALLOC_PROFILING
if (need_slab_obj_ext()) {
+ struct slabobj_ext *obj_exts;
+
obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
-#ifdef CONFIG_MEM_ALLOC_PROFILING
/*
* Currently obj_exts is used only for allocation profiling.
* If other users appear then mem_alloc_profiling_enabled()
@@ -3954,8 +3956,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
*/
if (likely(obj_exts))
alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
-#endif
}
+#endif
}
return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
diff --git a/mm/util.c b/mm/util.c
index c9e519e6811f..fe723241b66f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -139,14 +139,14 @@ EXPORT_SYMBOL(kmemdup_noprof);
* kmemdup_array - duplicate a given array.
*
* @src: array to duplicate.
- * @element_size: size of each element of array.
* @count: number of elements to duplicate from array.
+ * @element_size: size of each element of array.
* @gfp: GFP mask to use.
*
* Return: duplicated array of @src or %NULL in case of error,
* result is physically contiguous. Use kfree() to free.
*/
-void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
{
return kmemdup(src, size_mul(element_size, count), gfp);
}
@@ -705,7 +705,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
if (oldsize >= newsize)
return (void *)p;
- newp = kvmalloc(newsize, flags);
+ newp = kvmalloc_noprof(newsize, flags);
if (!newp)
return NULL;
memcpy(newp, p, oldsize);
@@ -726,7 +726,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return __vmalloc(bytes, flags);
+ return __vmalloc_noprof(bytes, flags);
}
EXPORT_SYMBOL(__vmalloc_array_noprof);
@@ -737,7 +737,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof);
*/
void *vmalloc_array_noprof(size_t n, size_t size)
{
- return __vmalloc_array(n, size, GFP_KERNEL);
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc_array_noprof);
@@ -749,7 +749,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof);
*/
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{
- return __vmalloc_array(n, size, flags | __GFP_ZERO);
+ return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(__vcalloc_noprof);
@@ -760,7 +760,7 @@ EXPORT_SYMBOL(__vcalloc_noprof);
*/
void *vcalloc_noprof(size_t n, size_t size)
{
- return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vcalloc_noprof);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5d3aa2dc88a8..d0cbdd7c1e5b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -722,7 +722,7 @@ int is_vmalloc_or_module_addr(const void *x)
* and fall back on vmalloc() if that fails. Others
* just put it in the vmalloc space.
*/
-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
unsigned long addr = (unsigned long)kasan_reset_tag(x);
if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1;
@@ -2498,6 +2498,7 @@ struct vmap_block {
struct list_head free_list;
struct rcu_head rcu_head;
struct list_head purge;
+ unsigned int cpu;
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -2625,8 +2626,15 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
free_vmap_area(va);
return ERR_PTR(err);
}
-
- vbq = raw_cpu_ptr(&vmap_block_queue);
+ /*
+ * list_add_tail_rcu could happened in another core
+ * rather than vb->cpu due to task migration, which
+ * is safe as list_add_tail_rcu will ensure the list's
+ * integrity together with list_for_each_rcu from read
+ * side.
+ */
+ vb->cpu = raw_smp_processor_id();
+ vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -2654,9 +2662,10 @@ static void free_vmap_block(struct vmap_block *vb)
}
static bool purge_fragmented_block(struct vmap_block *vb,
- struct vmap_block_queue *vbq, struct list_head *purge_list,
- bool force_purge)
+ struct list_head *purge_list, bool force_purge)
{
+ struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
+
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
return false;
@@ -2704,7 +2713,7 @@ static void purge_fragmented_blocks(int cpu)
continue;
spin_lock(&vb->lock);
- purge_fragmented_block(vb, vbq, &purge, true);
+ purge_fragmented_block(vb, &purge, true);
spin_unlock(&vb->lock);
}
rcu_read_unlock();
@@ -2841,7 +2850,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
* not purgeable, check whether there is dirty
* space to be flushed.
*/
- if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
+ if (!purge_fragmented_block(vb, &purge_list, false) &&
vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d55e8d07ffc4..2e34de9cd0d4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1227,7 +1227,7 @@ retry:
THP_SWPOUT_FALLBACK, 1);
count_vm_event(THP_SWPOUT_FALLBACK);
}
- count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
#endif
if (!add_to_swap(folio))
goto activate_locked_split;
diff --git a/net/9p/client.c b/net/9p/client.c
index 00774656eeac..5cd94721d974 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -236,6 +236,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
+ fc->id = 0;
+ fc->tag = P9_NOTAG;
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 8077cf2ee448..d6f9fae06a9d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
{
struct sk_buff *skb;
struct sock *newsk;
+ ax25_dev *ax25_dev;
DEFINE_WAIT(wait);
struct sock *sk;
+ ax25_cb *ax25;
int err = 0;
if (sock->state != SS_UNCONNECTED)
@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
+ ax25 = sk_to_ax25(newsk);
+ ax25_dev = ax25->ax25_dev;
+ netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+ ax25_dev_hold(ax25_dev);
out:
release_sock(sk);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 742d7c68e7e7..9efd6690b344 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -196,7 +196,7 @@ void __exit ax25_dev_free(void)
list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
netdev_put(s->dev, &s->dev_tracker);
list_del(&s->list);
- kfree(s);
+ ax25_dev_put(s);
}
spin_unlock_bh(&ax25_dev_lock);
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ac74f6ead62d..8f6dd2c6ee41 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -132,6 +133,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
+ * @vid: the VLAN identifier
+ *
+ * Return: true when either no vlan is set or if VLAN is in correct range,
+ * false otherwise
+ */
+static bool batadv_vlan_id_valid(unsigned short vid)
+{
+ unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
+
+ if (vid == 0)
+ return true;
+
+ if (!(vid & BATADV_VLAN_HAS_TAG))
+ return false;
+
+ if (non_vlan)
+ return false;
+
+ return true;
+}
+
+/**
* batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
* object
* @orig_node: the originator serving the VLAN
@@ -149,6 +173,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
{
struct batadv_orig_node_vlan *vlan;
+ if (!batadv_vlan_id_valid(vid))
+ return NULL;
+
spin_lock_bh(&orig_node->vlan_list_lock);
/* first look if an object for this vid already exists */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b21ff3c36b07..2243cec18ecc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -209,6 +209,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
+ * @rcu: rcu pointer of the tt_local_entry
+ */
+static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+
+ tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tl_cache, tt_local_entry);
+}
+
+/**
* batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -222,7 +236,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
batadv_softif_vlan_put(tt_local_entry->vlan);
- kfree_rcu(tt_local_entry, common.rcu);
+ call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
}
/**
@@ -241,6 +255,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
}
/**
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
+ * @rcu: rcu pointer of the tt_global_entry
+ */
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tg_cache, tt_global_entry);
+}
+
+/**
* batadv_tt_global_entry_release() - release tt_global_entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -254,7 +282,7 @@ void batadv_tt_global_entry_release(struct kref *ref)
batadv_tt_global_del_orig_list(tt_global_entry);
- kfree_rcu(tt_global_entry, common.rcu);
+ call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
}
/**
@@ -380,6 +408,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
+ * @rcu: rcu pointer of the orig_entry
+ */
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+
+ kmem_cache_free(batadv_tt_orig_cache, orig_entry);
+}
+
+/**
* batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
@@ -392,7 +433,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
refcount);
batadv_orig_node_put(orig_entry->orig_node);
- kfree_rcu(orig_entry, rcu);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
/**
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 16daa79b7981..a8a7d2b36870 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1194,7 +1194,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.handle = instance;
+ cp.handle = adv ? adv->handle : instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
cp.primary_phy = HCI_ADV_PHY_1M;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5b509b767557..aed025734d04 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4011,8 +4011,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
} else {
- l2cap_state_change(chan, BT_CONNECT2);
- result = L2CAP_CR_PEND;
+ l2cap_state_change(chan, BT_CONFIG);
+ result = L2CAP_CR_SUCCESS;
status = L2CAP_CS_NO_INFO;
}
} else {
@@ -4647,13 +4647,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- if (max > hcon->le_conn_max_interval) {
- BT_DBG("requested connection interval exceeds current bounds.");
- err = -EINVAL;
- } else {
- err = hci_check_conn_params(min, max, latency, to_multiplier);
- }
-
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index f6aad4ed2ab2..36ae54f57bf5 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -727,10 +727,16 @@ static void
__bpf_prog_test_run_raw_tp(void *data)
{
struct bpf_raw_tp_test_run_info *info = data;
+ struct bpf_trace_run_ctx run_ctx = {};
+ struct bpf_run_ctx *old_run_ctx;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
rcu_read_lock();
info->retval = bpf_prog_run(info->prog, info->ctx);
rcu_read_unlock();
+
+ bpf_reset_run_ctx(old_run_ctx);
}
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
index 3c66141d34d6..1820f09ff59c 100644
--- a/net/bridge/br_mst.c
+++ b/net/bridge/br_mst.c
@@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
}
EXPORT_SYMBOL_GPL(br_mst_get_state);
-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
+static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
+ struct net_bridge_vlan *v,
u8 state)
{
- struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
-
if (br_vlan_get_state(v) == state)
return;
@@ -103,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
int err = 0;
rcu_read_lock();
- vg = nbp_vlan_group(p);
+ vg = nbp_vlan_group_rcu(p);
if (!vg)
goto out;
@@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
if (v->brvlan->msti != msti)
continue;
- br_mst_vlan_set_state(p, v, state);
+ br_mst_vlan_set_state(vg, v, state);
}
out:
@@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
* it.
*/
if (v != pv && v->brvlan->msti == msti) {
- br_mst_vlan_set_state(pv->port, pv, v->state);
+ br_mst_vlan_set_state(vg, pv, v->state);
return;
}
}
/* Otherwise, start out in a new MSTI with all ports disabled. */
- return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
+ return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
}
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index a6fb89fa6278..7e8a20f2fc42 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
-/* CAN_FTR: #bytes beyond data part */
-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
- sizeof(((struct can_frame *)0)->data))
-
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
memset(cf, 0, J1939_CAN_HDR);
/* make it a full can frame again */
- skb_put(skb, J1939_CAN_FTR + (8 - dlc));
+ skb_put_zero(skb, 8 - dlc);
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe3df23a2595..4be73de5033c 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1593,8 +1593,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
struct j1939_session *session;
const u8 *dat;
+ int len, ret;
pgn_t pgn;
- int len;
netdev_dbg(priv->ndev, "%s\n", __func__);
@@ -1653,7 +1653,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
session->tskey = priv->rx_tskey++;
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
- WARN_ON_ONCE(j1939_session_activate(session));
+ ret = j1939_session_activate(session);
+ if (ret) {
+ /* Entering this scope indicates an issue with the J1939 bus.
+ * Possible scenarios include:
+ * - A time lapse occurred, and a new session was initiated
+ * due to another packet being sent correctly. This could
+ * have been caused by too long interrupt, debugger, or being
+ * out-scheduled by another task.
+ * - The bus is receiving numerous erroneous packets, either
+ * from a malfunctioning device or during a test scenario.
+ */
+ netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
+ __func__, session, skcb.addr.sa, skcb.addr.da);
+ j1939_session_put(session);
+ return NULL;
+ }
return session;
}
@@ -1681,6 +1696,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
+ if (session->transmission)
+ j1939_session_deactivate_activate_next(session);
return -EBUSY;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index e1bb6d7856d9..2b4819b610b8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1226,9 +1226,9 @@ int dev_change_name(struct net_device *dev, const char *newname)
memcpy(oldname, dev->name, IFNAMSIZ);
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
err = dev_get_valid_name(net, dev, newname);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
if (err < 0) {
up_write(&devnet_rename_sem);
@@ -1269,9 +1269,9 @@ rollback:
if (err >= 0) {
err = ret;
down_write(&devnet_rename_sem);
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
memcpy(oldname, newname, IFNAMSIZ);
WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
@@ -4516,12 +4516,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
if (next_cpu < nr_cpu_ids) {
+ u32 head;
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
- u32 flow_id, head;
u16 rxq_index;
+ u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -11418,9 +11419,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
if (new_name[0]) {
/* Rename the netdev to prepared name */
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
strscpy(dev->name, new_name, IFNAMSIZ);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
}
/* Fixup kobjects */
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index 6a0482e676d3..70c634b9e7b0 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -27,6 +27,7 @@ struct dst_cache_pcpu {
static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
struct dst_entry *dst, u32 cookie)
{
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst_release(dst_cache->dst);
if (dst)
dst_hold(dst);
@@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
{
struct dst_entry *dst;
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst = idst->dst;
if (!dst)
goto fail;
diff --git a/net/core/filter.c b/net/core/filter.c
index 2510464692af..9933851c685e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1665,6 +1665,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
+#ifdef CONFIG_DEBUG_NET
+ /* Avoid a splat in pskb_may_pull_reason() */
+ if (write_len > INT_MAX)
+ return -EINVAL;
+#endif
return skb_ensure_writable(skb, write_len);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 4f7a61688d18..6a823ba906c6 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -693,11 +693,16 @@ EXPORT_SYMBOL_GPL(__put_net);
* get_net_ns - increment the refcount of the network namespace
* @ns: common namespace (net)
*
- * Returns the net's common namespace.
+ * Returns the net's common namespace or ERR_PTR() if ref is zero.
*/
struct ns_common *get_net_ns(struct ns_common *ns)
{
- return &get_net(container_of(ns, struct net, ns))->ns;
+ struct net *net;
+
+ net = maybe_get_net(container_of(ns, struct net, ns));
+ if (net)
+ return &net->ns;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(get_net_ns);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 1f6ae6379e0f..05f9515d2c05 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -59,22 +59,22 @@ XDP_METADATA_KFUNC_xxx
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
xdp_rx_meta, NETDEV_A_DEV_PAD) ||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
- xsk_features, NETDEV_A_DEV_PAD)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ xsk_features, NETDEV_A_DEV_PAD))
+ goto err_cancel_msg;
if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
- netdev->xdp_zc_max_segs)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ netdev->xdp_zc_max_segs))
+ goto err_cancel_msg;
}
genlmsg_end(rsp, hdr);
return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
}
static void
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b86b0a87367d..4668d6718040 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Process one rtnetlink message. */
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ rtnl_dumpit_func dumpit = cb->data;
+ int err;
+
+ /* Previous iteration have already finished, avoid calling->dumpit()
+ * again, it may not expect to be called after it reached the end.
+ */
+ if (!dumpit)
+ return 0;
+
+ err = dumpit(skb, cb);
+
+ /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+ * Some applications which parse netlink manually depend on this.
+ */
+ if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ if (err < 0 && err != -EMSGSIZE)
+ return err;
+ if (!err)
+ cb->data = NULL;
+
+ return skb->len;
+ }
+ return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ WARN_ON(control->data);
+ control->data = control->dump;
+ control->dump = rtnl_dumpit;
+ }
+
+ return netlink_dump_start(ssk, skb, nlh, control);
+}
+
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.module = owner,
.flags = flags,
};
- err = netlink_dump_start(rtnl, skb, nlh, &c);
+ err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
* module if dump is still in progress.
*/
@@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void)
register_netdevice_notifier(&rtnetlink_dev_notifier);
rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
- rtnl_dump_ifinfo, 0);
+ rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
diff --git a/net/core/sock.c b/net/core/sock.c
index 8629f9aecf91..100e975073ca 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3742,6 +3742,9 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->unhash(sk);
+ if (sk->sk_socket)
+ sk->sk_socket->sk = NULL;
+
/*
* In this point socket cannot receive new packets, but it is possible
* that some packets are in flight because some CPU runs receiver and
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 9402889840bf..d3dbb92153f2 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -423,9 +423,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk;
int err = 0;
- if (irqs_disabled())
- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
@@ -948,9 +945,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
- if (irqs_disabled())
- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
@@ -1680,19 +1674,23 @@ void sock_map_close(struct sock *sk, long timeout)
lock_sock(sk);
rcu_read_lock();
- psock = sk_psock_get(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- saved_close = READ_ONCE(sk->sk_prot)->close;
- } else {
+ psock = sk_psock(sk);
+ if (likely(psock)) {
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ goto no_psock;
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_delayed_work_sync(&psock->work);
sk_psock_put(sk, psock);
+ } else {
+ saved_close = READ_ONCE(sk->sk_prot)->close;
+no_psock:
+ rcu_read_unlock();
+ release_sock(sk);
}
/* Make sure we do not recurse. This is a bug.
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 41693154e426..022c12059cf2 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -295,10 +295,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
mutex_lock(&mem_id_lock);
ret = __mem_id_init_hash_table();
mutex_unlock(&mem_id_lock);
- if (ret < 0) {
- WARN_ON(1);
+ if (ret < 0)
return ERR_PTR(ret);
- }
}
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ff41bd6f99c3..5926159a6f20 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -657,8 +657,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v4_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 85f4b8fdbe5e..da5dba120bc9 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -400,8 +400,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v6_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 049c3adeb850..4e3651101b86 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- eth = (struct ethhdr *)skb->data;
- skb_pull_inline(skb, ETH_HLEN);
-
+ eth = eth_skb_pull_mac(skb);
eth_skb_pkt_type(skb, dev);
/*
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 5a55270aa86e..e645d751a5e8 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
int n_stats, ret;
- if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+ if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
index be2755c8d8fd..57d496287e52 100644
--- a/net/ethtool/tsinfo.c
+++ b/net/ethtool/tsinfo.c
@@ -38,11 +38,11 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- if (req_base->flags & ETHTOOL_FLAG_STATS &&
- dev->ethtool_ops->get_ts_stats) {
+ if (req_base->flags & ETHTOOL_FLAG_STATS) {
ethtool_stats_init((u64 *)&data->stats,
sizeof(data->stats) / sizeof(u64));
- dev->ethtool_ops->get_ts_stats(dev, &data->stats);
+ if (dev->ethtool_ops->get_ts_stats)
+ dev->ethtool_ops->get_ts_stats(dev, &data->stats);
}
ret = __ethtool_get_ts_info(dev, &data->ts_info);
ethnl_ops_complete(dev);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e03ba4a21c39..b24d74616637 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1532,7 +1532,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
}
NAPI_GRO_CB(skb)->flush |= flush;
- NAPI_GRO_CB(skb)->inner_network_offset = off;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index dd6d46015058..e9cb27061c12 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1810,6 +1810,29 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
return CIPSO_V4_HDR_LEN + ret_val;
}
+static int cipso_v4_get_actual_opt_len(const unsigned char *data, int len)
+{
+ int iter = 0, optlen = 0;
+
+ /* determining the new total option length is tricky because of
+ * the padding necessary, the only thing i can think to do at
+ * this point is walk the options one-by-one, skipping the
+ * padding at the end to determine the actual option size and
+ * from there we can determine the new total option length
+ */
+ while (iter < len) {
+ if (data[iter] == IPOPT_END) {
+ break;
+ } else if (data[iter] == IPOPT_NOP) {
+ iter++;
+ } else {
+ iter += data[iter + 1];
+ optlen = iter;
+ }
+ }
+ return optlen;
+}
+
/**
* cipso_v4_sock_setattr - Add a CIPSO option to a socket
* @sk: the socket
@@ -1986,7 +2009,6 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
u8 cipso_len;
u8 cipso_off;
unsigned char *cipso_ptr;
- int iter;
int optlen_new;
cipso_off = opt->opt.cipso - sizeof(struct iphdr);
@@ -2006,19 +2028,8 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
memmove(cipso_ptr, cipso_ptr + cipso_len,
opt->opt.optlen - cipso_off - cipso_len);
- /* determining the new total option length is tricky because of
- * the padding necessary, the only thing i can think to do at
- * this point is walk the options one-by-one, skipping the
- * padding at the end to determine the actual option size and
- * from there we can determine the new total option length */
- iter = 0;
- optlen_new = 0;
- while (iter < opt->opt.optlen)
- if (opt->opt.__data[iter] != IPOPT_NOP) {
- iter += opt->opt.__data[iter + 1];
- optlen_new = iter;
- } else
- iter++;
+ optlen_new = cipso_v4_get_actual_opt_len(opt->opt.__data,
+ opt->opt.optlen);
hdr_delta = opt->opt.optlen;
opt->opt.optlen = (optlen_new + 3) & ~3;
hdr_delta -= opt->opt.optlen;
@@ -2238,7 +2249,8 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
*/
int cipso_v4_skbuff_delattr(struct sk_buff *skb)
{
- int ret_val;
+ int ret_val, cipso_len, hdr_len_actual, new_hdr_len_actual, new_hdr_len,
+ hdr_len_delta;
struct iphdr *iph;
struct ip_options *opt = &IPCB(skb)->opt;
unsigned char *cipso_ptr;
@@ -2251,16 +2263,37 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
if (ret_val < 0)
return ret_val;
- /* the easiest thing to do is just replace the cipso option with noop
- * options since we don't change the size of the packet, although we
- * still need to recalculate the checksum */
-
iph = ip_hdr(skb);
cipso_ptr = (unsigned char *)iph + opt->cipso;
- memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
+ cipso_len = cipso_ptr[1];
+
+ hdr_len_actual = sizeof(struct iphdr) +
+ cipso_v4_get_actual_opt_len((unsigned char *)(iph + 1),
+ opt->optlen);
+ new_hdr_len_actual = hdr_len_actual - cipso_len;
+ new_hdr_len = (new_hdr_len_actual + 3) & ~3;
+ hdr_len_delta = (iph->ihl << 2) - new_hdr_len;
+
+ /* 1. shift any options after CIPSO to the left */
+ memmove(cipso_ptr, cipso_ptr + cipso_len,
+ new_hdr_len_actual - opt->cipso);
+ /* 2. move the whole IP header to its new place */
+ memmove((unsigned char *)iph + hdr_len_delta, iph, new_hdr_len_actual);
+ /* 3. adjust the skb layout */
+ skb_pull(skb, hdr_len_delta);
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ /* 4. re-fill new padding with IPOPT_END (may now be longer) */
+ memset((unsigned char *)iph + new_hdr_len_actual, IPOPT_END,
+ new_hdr_len - new_hdr_len_actual);
+
+ opt->optlen -= hdr_len_delta;
opt->cipso = 0;
opt->is_changed = 1;
-
+ if (hdr_len_delta != 0) {
+ iph->ihl = new_hdr_len >> 2;
+ iph_set_totlen(iph, skb->len);
+ }
ip_send_check(iph);
return 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 96accde527da..d09f557eaa77 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1887,10 +1887,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
goto done;
if (fillargs.ifindex) {
- err = -ENODEV;
dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
- if (!dev)
+ if (!dev) {
+ err = -ENODEV;
goto done;
+ }
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto done;
@@ -1902,7 +1903,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = inet_base_seq(tgt_net);
- for_each_netdev_dump(net, dev, ctx->ifindex) {
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
continue;
@@ -2804,7 +2805,7 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf,
RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c484b1c0fc00..7ad2cafb9276 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,11 +1050,6 @@ next:
e++;
}
}
-
- /* Don't let NLM_DONE coalesce into a message, even if it could.
- * Some user space expects NLM_DONE in a separate recv().
- */
- err = skb->len;
out:
cb->args[1] = e;
@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d81f74ce0f02..d4f0eff8b20f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1122,25 +1122,34 @@ drop:
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
}
-static void reqsk_queue_hash_req(struct request_sock *req,
+static bool reqsk_queue_hash_req(struct request_sock *req,
unsigned long timeout)
{
+ bool found_dup_sk = false;
+
+ if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
+ return false;
+
+ /* The timer needs to be setup after a successful insertion. */
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
mod_timer(&req->rsk_timer, jiffies + timeout);
- inet_ehash_insert(req_to_sk(req), NULL, NULL);
/* before letting lookups find us, make sure all req fields
* are committed to memory and refcnt initialized.
*/
smp_wmb();
refcount_set(&req->rsk_refcnt, 2 + 1);
+ return true;
}
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
- reqsk_queue_hash_req(req, timeout);
+ if (!reqsk_queue_hash_req(req, timeout))
+ return false;
+
inet_csk_reqsk_queue_added(sk);
+ return true;
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 69e331799604..73e66a088e25 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
laddr = 0;
indev = __in_dev_get_rcu(skb->dev);
+ if (!indev)
+ return daddr;
in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5fd54103174f..b3073d1c8f8f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ipv4_mtu(const struct dst_entry *dst);
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
@@ -825,22 +826,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
__ip_do_redirect(rt, skb, &fl4, true);
}
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
struct rtable *rt = dst_rtable(dst);
- struct dst_entry *ret = dst;
- if (rt) {
- if (dst->obsolete > 0) {
- ip_rt_put(rt);
- ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- rt->dst.expires) {
- ip_rt_put(rt);
- ret = NULL;
- }
- }
- return ret;
+ if ((dst->obsolete > 0) ||
+ (rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires)
+ sk_dst_reset(sk);
}
/*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 681b54e1f3a6..e6790ea74877 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1165,6 +1165,9 @@ new_segment:
process_backlog++;
+#ifdef CONFIG_SKB_DECRYPTED
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
tcp_skb_entail(sk, skb);
copy = size_goal;
@@ -2646,6 +2649,10 @@ void tcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
break;
+ case TCP_CLOSE_WAIT:
+ if (oldstate == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2657,7 +2664,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
fallthrough;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 781b67a52571..09c0fa6756b7 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
struct tcp_ao_key *key;
__be32 sisn, disn;
u8 *traffic_key;
+ int state;
u32 sne = 0;
info = rcu_dereference(tcp_sk(sk)->ao_info);
@@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
disn = 0;
}
+ state = READ_ONCE(sk->sk_state);
/* Fast-path */
- if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+ if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
enum skb_drop_reason err;
struct tcp_ao_key *current_key;
@@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
return SKB_NOT_DROPPED_YET;
}
+ if (unlikely(state == TCP_CLOSE))
+ return SKB_DROP_REASON_TCP_CLOSE;
+
/* Lookup key based on peer address and keyid.
* current_key and rnext_key must not be used on tcp listen
* sockets as otherwise:
@@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
if (th->syn && !th->ack)
goto verify_hash;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+ if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
/* Make the initial syn the likely case here */
if (unlikely(req)) {
sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
/* no way to figure out initial sisn/disn - drop */
return SKB_DROP_REASON_TCP_FLAGS;
}
- } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
disn = info->lisn;
if (th->syn || th->rst)
sisn = th->seq;
else
sisn = info->risn;
} else {
- WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+ WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
verify_hash:
@@ -1963,8 +1968,10 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
first = true;
}
- if (cmd.ao_required && tcp_ao_required_verify(sk))
- return -EKEYREJECTED;
+ if (cmd.ao_required && tcp_ao_required_verify(sk)) {
+ err = -EKEYREJECTED;
+ goto out;
+ }
/* For sockets in TCP_CLOSED it's possible set keys that aren't
* matching the future peer (address/port/VRF/etc),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9c04a9c8be9d..2e39cb881e20 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2782,13 +2782,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
}
+/* Sometimes we deduce that packets have been dropped due to reasons other than
+ * congestion, like path MTU reductions or failed client TFO attempts. In these
+ * cases we call this function to retransmit as many packets as cwnd allows,
+ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
+ * non-zero value (and may do so in a later calling context due to TSQ), we
+ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
+ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
+ * are using the correct retrans_stamp and don't declare ETIMEDOUT
+ * prematurely).
+ */
+static void tcp_non_congestion_loss_retransmit(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (icsk->icsk_ca_state != TCP_CA_Loss) {
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = 0;
+ tcp_set_ca_state(sk, TCP_CA_Loss);
+ }
+ tcp_xmit_retransmit_queue(sk);
+}
+
/* Do a simple retransmit without using the backoff mechanisms in
* tcp_timer. This is used for path mtu discovery.
* The socket is already locked here.
*/
void tcp_simple_retransmit(struct sock *sk)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int mss;
@@ -2828,14 +2852,7 @@ void tcp_simple_retransmit(struct sock *sk)
* in network, but units changed and effective
* cwnd/ssthresh really reduced now.
*/
- if (icsk->icsk_ca_state != TCP_CA_Loss) {
- tp->high_seq = tp->snd_nxt;
- tp->snd_ssthresh = tcp_current_ssthresh(sk);
- tp->prior_ssthresh = 0;
- tp->undo_marker = 0;
- tcp_set_ca_state(sk, TCP_CA_Loss);
- }
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
}
EXPORT_SYMBOL(tcp_simple_retransmit);
@@ -6295,7 +6312,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
skb_rbtree_walk_from(data)
tcp_mark_skb_lost(sk, data);
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true;
@@ -7256,7 +7273,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->tfo_listener = false;
if (!want_cookie) {
req->timeout = tcp_timeout_init((struct sock *)req);
- inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ req->timeout))) {
+ reqsk_free(req);
+ return 0;
+ }
+
}
af_ops->send_synack(sk, dst, &fl, req, &foc,
!want_cookie ? TCP_SYNACK_NORMAL :
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30ef0c8f5e92..b710958393e6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1144,14 +1144,9 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
#endif
}
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent),
0, &key,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b93619b2384b..538c06f95918 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -783,8 +783,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* RFC793: "first check sequence number". */
- if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
+ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(skb)->end_seq,
+ tcp_rsk(req)->rcv_nxt,
+ tcp_rsk(req)->rcv_nxt +
+ tcp_synack_window(req))) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST) &&
!tcp_oow_rate_limited(sock_net(sk), skb,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 83fe7f62f7f1..5bfd76a31af6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -485,8 +485,12 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
{
const struct tcp_sock *tp = tcp_sk(sk);
const int timeout = TCP_RTO_MAX * 2;
- u32 rcv_delta;
+ s32 rcv_delta;
+ /* Note: timer interrupt might have been delayed by at least one jiffy,
+ * and tp->rcv_tstamp might very well have been written recently.
+ * rcv_delta can thus be negative.
+ */
rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 0601bad79822..ff7e734e335b 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return orig_dst->lwtstate->orig_output(net, sk, skb);
}
+ local_bh_disable();
dst = dst_cache_get(&ilwt->dst_cache);
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct flowi6 fl6;
@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (ilwt->connected)
+ if (ilwt->connected) {
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
}
skb_dst_set(skb, dst);
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 7563f8c6aa87..bf7120ecea1e 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -351,9 +351,9 @@ do_encap:
goto drop;
if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&ilwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -373,9 +373,9 @@ do_encap:
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 31d77885bcae..83e4f9855ae1 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -966,6 +966,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (!fib6_nh->rt6i_pcpu)
return;
+ rcu_read_lock();
/* release the reference to this fib entry from
* all of its cached pcpu routes
*/
@@ -974,7 +975,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
struct rt6_info *pcpu_rt;
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
- pcpu_rt = *ppcpu_rt;
+
+ /* Paired with xchg() in rt6_get_pcpu_route() */
+ pcpu_rt = READ_ONCE(*ppcpu_rt);
/* only dropping the 'from' reference if the cached route
* is using 'match'. The cached pcpu_rt->from only changes
@@ -988,6 +991,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
fib6_info_release(from);
}
}
+ rcu_read_unlock();
}
struct fib6_nh_pcpu_arg {
@@ -2510,7 +2514,8 @@ int __init fib6_init(void)
goto out_kmem_cache_create;
ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
- inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED);
+ inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED |
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
if (ret)
goto out_unregister_subsys;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index bd5aff97d8b1..9822163428b0 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -236,7 +236,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
if (unlikely(!iph))
goto out;
- NAPI_GRO_CB(skb)->inner_network_offset = off;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
flush += ntohs(iph->payload_len) != skb->len - hlen;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 53d255838e6a..5d989d803009 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -36,6 +36,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
.saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
};
int err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bbc2a0dd9314..8d72ca0b086d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ip6_mtu(const struct dst_entry *dst);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev);
@@ -637,6 +638,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
rcu_read_lock();
last_probe = READ_ONCE(fib6_nh->last_probe);
idev = __in6_dev_get(dev);
+ if (!idev)
+ goto out;
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
if (READ_ONCE(neigh->nud_state) & NUD_VALID)
@@ -1408,6 +1411,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
struct rt6_info *prev, **p;
p = this_cpu_ptr(res->nh->rt6i_pcpu);
+ /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
prev = xchg(p, NULL);
if (prev) {
dst_dev_put(&prev->dst);
@@ -2770,24 +2774,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
}
EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
struct rt6_info *rt = dst_rt6_info(dst);
- if (rt) {
- if (rt->rt6i_flags & RTF_CACHE) {
- rcu_read_lock();
- if (rt6_check_expired(rt)) {
- rt6_remove_exception_rt(rt);
- dst = NULL;
- }
- rcu_read_unlock();
- } else {
- dst_release(dst);
- dst = NULL;
+ if (rt->rt6i_flags & RTF_CACHE) {
+ rcu_read_lock();
+ if (rt6_check_expired(rt)) {
+ /* counteract the dst_release() in sk_dst_reset() */
+ dst_hold(dst);
+ sk_dst_reset(sk);
+
+ rt6_remove_exception_rt(rt);
}
+ rcu_read_unlock();
+ return;
}
- return dst;
+ sk_dst_reset(sk);
}
static void ip6_link_failure(struct sk_buff *skb)
@@ -3601,7 +3605,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
if (!dev)
goto out;
- if (idev->cnf.disable_ipv6) {
+ if (!idev || idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
err = -EACCES;
goto out;
@@ -6341,12 +6345,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
if (!write)
return -EINVAL;
- net = (struct net *)ctl->extra1;
- delay = net->ipv6.sysctl.flush_delay;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (ret)
return ret;
+ net = (struct net *)ctl->extra1;
+ delay = net->ipv6.sysctl.flush_delay;
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index a013b92cbb86..2c83b7586422 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (unlikely(err))
goto drop;
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
@@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb)
return err;
}
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
if (!dst->error) {
- preempt_disable();
dst_cache_set_ip6(&rlwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
}
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
+ local_bh_enable();
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index a75df2ec8db0..098632adc9b5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
if (!dst->error) {
- preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
}
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
+ local_bh_enable();
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 24e2b4b494cb..c434940131b1 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx6_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx6_finish);
return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx4_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx4_finish);
return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
drop:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4c3605485b68..729faf8bd366 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1272,15 +1272,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
&key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
@@ -1444,7 +1439,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
@@ -1455,6 +1449,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ ip6_dst_store(newsk, dst, NULL, NULL);
+
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index cc885d3aa9e5..2f1ea5f999a2 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
{
struct dst_entry *dst;
struct net_device *dev;
+ struct inet6_dev *idev;
dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
- dev = ip6_dst_idev(dst)->dev;
+ idev = ip6_dst_idev(dst);
+ if (!idev) {
+ dst_release(dst);
+ return -EHOSTUNREACH;
+ }
+ dev = idev->dev;
ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
dst_release(dst);
return 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b08e5d7687e3..83ad6c9709fe 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2958,8 +2958,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
memcpy(sdata->vif.bss_conf.mcast_rate, rate,
sizeof(int) * NUM_NL80211_BANDS);
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_MCAST_RATE);
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+ BSS_CHANGED_MCAST_RATE);
return 0;
}
@@ -4016,7 +4017,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
- link_data->csa_chanreq = chanreq;
+ link_data->csa_chanreq = chanreq;
link_conf->csa_active = true;
if (params->block_tx &&
@@ -4027,7 +4028,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
}
cfg80211_ch_switch_started_notify(sdata->dev,
- &link_data->csa_chanreq.oper, 0,
+ &link_data->csa_chanreq.oper, link_id,
params->count, params->block_tx);
if (changed) {
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index dce37ba8ebe3..254d745832cb 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -311,6 +311,18 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ /*
+ * We should perhaps push emulate chanctx down and only
+ * make it call ->config() when the chanctx is actually
+ * assigned here (and unassigned below), but that's yet
+ * another change to all drivers to add assign/unassign
+ * emulation callbacks. Maybe later.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
if (!check_sdata_in_driver(sdata))
return -EIO;
@@ -338,6 +350,11 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 9f5ffdc9db28..ecbb042dd043 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
if (!he_spr_ie_elem)
return;
+
+ he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
data = he_spr_ie_elem->optional;
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- data++;
+ he_obss_pd->non_srg_max_offset = *data++;
+
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
- he_obss_pd->max_offset = *data++;
he_obss_pd->min_offset = *data++;
+ he_obss_pd->max_offset = *data++;
+ memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+ data += 8;
+ memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
he_obss_pd->enable = true;
}
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index eb62b7d4b4f7..3cedfdc9099b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1845,6 +1845,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+void ieee80211_handle_queued_frames(struct ieee80211_local *local);
+
u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
u64 *cookie, gfp_t gfp);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dc42902e2693..b935bb5d8ed1 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -686,6 +686,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
ieee80211_del_virtual_monitor(local);
ieee80211_recalc_idle(local);
+ ieee80211_recalc_offload(local);
if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
break;
@@ -1121,9 +1122,6 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1145,11 +1143,13 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
ieee80211_set_default_queues(sdata);
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1187,9 +1187,6 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1209,7 +1206,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
ieee80211_link_release_channel(&sdata->deflink);
- drv_remove_interface(local, sdata);
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ drv_remove_interface(local, sdata);
kfree(sdata);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4eaea0a9975b..1132dea0e290 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -423,9 +423,8 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_ERP_SLOT;
}
-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+void ieee80211_handle_queued_frames(struct ieee80211_local *local)
{
- struct ieee80211_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -450,6 +449,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
}
}
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+{
+ struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+
+ ieee80211_handle_queued_frames(local);
+}
+
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index cbc9b5e40cb3..6d4510221c98 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+ ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a6b62169f084..c0a5c75cddcb 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct mesh_preq_queue *preq, *tmp;
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(mpath->sdata, skb);
+
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+ list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+ if (ether_addr_equal(mpath->dst, preq->dst)) {
+ list_del(&preq->list);
+ kfree(preq);
+ --ifmsh->preq_queue_len;
+ }
+ }
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
}
/**
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
index 55e5497f8978..055a60e90979 100644
--- a/net/mac80211/parse.c
+++ b/net/mac80211/parse.c
@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
if (params->mode < IEEE80211_CONN_MODE_HE)
break;
if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
+ len >= ieee80211_he_spr_size(data) - 1)
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3da1c5c45035..b5f2df61c7f6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -358,7 +358,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
struct cfg80211_scan_request *req;
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
- int i, ielen, n_chans;
+ int i, ielen;
+ u32 *n_chans;
u32 flags = 0;
req = rcu_dereference_protected(local->scan_req,
@@ -368,34 +369,34 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
return false;
if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
for (i = 0; i < req->n_channels; i++) {
local->hw_scan_req->req.channels[i] = req->channels[i];
bands_used |= BIT(req->channels[i]->band);
}
-
- n_chans = req->n_channels;
} else {
do {
if (local->hw_scan_band == NUM_NL80211_BANDS)
return false;
- n_chans = 0;
+ n_chans = &local->hw_scan_req->req.n_channels;
+ *n_chans = 0;
for (i = 0; i < req->n_channels; i++) {
if (req->channels[i]->band !=
local->hw_scan_band)
continue;
- local->hw_scan_req->req.channels[n_chans] =
+ local->hw_scan_req->req.channels[(*n_chans)++] =
req->channels[i];
- n_chans++;
+
bands_used |= BIT(req->channels[i]->band);
}
local->hw_scan_band++;
- } while (!n_chans);
+ } while (!*n_chans);
}
- local->hw_scan_req->req.n_channels = n_chans;
ieee80211_prepare_scan_chandef(&chandef);
if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
@@ -744,15 +745,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_ies_bufsize *= n_bands;
}
- local->hw_scan_req = kmalloc(
- sizeof(*local->hw_scan_req) +
- req->n_channels * sizeof(req->channels[0]) +
- local->hw_scan_ies_bufsize, GFP_KERNEL);
+ local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
+ req.channels,
+ req->n_channels) +
+ local->hw_scan_ies_bufsize,
+ GFP_KERNEL);
if (!local->hw_scan_req)
return -ENOMEM;
local->hw_scan_req->req.ssids = req->ssids;
local->hw_scan_req->req.n_ssids = req->n_ssids;
+ /* None of the channels are actually set
+ * up but let UBSAN know the boundaries.
+ */
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
ies = (u8 *)local->hw_scan_req +
sizeof(*local->hw_scan_req) +
req->n_channels * sizeof(req->channels[0]);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index da5fdd6f5c85..aa22f09e6d14 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
/* sync with ieee80211_tx_h_unicast_ps_buf */
- spin_lock(&sta->ps_lock);
+ spin_lock_bh(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
*/
clear_sta_flag(sta, WLAN_STA_PSPOLL);
clear_sta_flag(sta, WLAN_STA_UAPSD);
- spin_unlock(&sta->ps_lock);
+ spin_unlock_bh(&sta->ps_lock);
atomic_dec(&ps->num_sta_ps);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0b893e958959..963ed75deb76 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1567,6 +1567,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
void ieee80211_stop_device(struct ieee80211_local *local)
{
+ ieee80211_handle_queued_frames(local);
+
ieee80211_led_radio(local, false);
ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
@@ -1841,7 +1843,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add interfaces */
sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
/* in HW restart it exists already */
WARN_ON(local->resuming);
res = drv_add_interface(local, sdata);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 7f53e022e27e..ea9e5817b9e9 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -677,6 +677,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool sf_created = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -704,15 +705,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
if (nr == 0)
return;
- msk->pm.add_addr_accepted++;
- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
- WRITE_ONCE(msk->pm.accept_addr, false);
-
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &addrs[i], &remote);
+ if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
+ sf_created = true;
spin_lock_bh(&msk->pm.lock);
+
+ if (sf_created) {
+ msk->pm.add_addr_accepted++;
+ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ msk->pm.subflows >= subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
+ }
}
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
@@ -814,10 +818,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
spin_lock_bh(&msk->pm.lock);
removed = true;
- __MPTCP_INC_STATS(sock_net(sk), rm_type);
+ if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
}
if (rm_type == MPTCP_MIB_RMSUBFLOW)
__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
+ else if (rm_type == MPTCP_MIB_RMADDR)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
if (!removed)
continue;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7d44196ec5b6..bb7dca8aa2d9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2916,9 +2916,14 @@ void mptcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
break;
-
+ case TCP_CLOSE_WAIT:
+ /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+ * MPTCP "accepted" sockets will be created later on. So no
+ * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+ */
+ break;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
}
@@ -3735,6 +3740,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
WRITE_ONCE(msk->write_seq, subflow->idsn);
WRITE_ONCE(msk->snd_nxt, subflow->idsn);
+ WRITE_ONCE(msk->snd_una, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 374412ed780b..ef0f8f73826f 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
+ unsigned int channel_probe_id;/* Current cahnnel ID during probe */
struct list_head packages; /* List of packages */
struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
bool multi_package; /* Enable multiple packages */
bool mlx_multi_host; /* Enable multi host Mellanox */
u32 package_whitelist; /* Packages to configure */
+ unsigned char channel_count; /* Num of channels to probe */
};
struct ncsi_cmd_arg {
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 745c788f1d1d..5ecf611c8820 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_suspend_gls:
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
+ nca.channel = ndp->channel_probe_id;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+ ndp->channel_probe_id++;
- nd->state = ncsi_dev_state_suspend_dcnt;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_suspend_dcnt;
}
break;
@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
- struct ncsi_channel *nc;
struct ncsi_cmd_arg nca;
unsigned char index;
int ret;
@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_cis;
break;
- case ncsi_dev_state_probe_cis:
- ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
- /* Clear initial state */
- nca.type = NCSI_PKT_CMD_CIS;
- nca.package = ndp->active_package->id;
- for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
- nca.channel = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
- nd->state = ncsi_dev_state_probe_gvi;
- if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
- nd->state = ncsi_dev_state_probe_keep_phy;
- break;
case ncsi_dev_state_probe_keep_phy:
ndp->pending_req_num = 1;
@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_gvi;
break;
+ case ncsi_dev_state_probe_cis:
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
np = ndp->active_package;
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
- /* Retrieve version, capability or link status */
- if (nd->state == ncsi_dev_state_probe_gvi)
+ /* Clear initial state Retrieve version, capability or link status */
+ if (nd->state == ncsi_dev_state_probe_cis)
+ nca.type = NCSI_PKT_CMD_CIS;
+ else if (nd->state == ncsi_dev_state_probe_gvi)
nca.type = NCSI_PKT_CMD_GVI;
else if (nd->state == ncsi_dev_state_probe_gc)
nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
+ nca.channel = ndp->channel_probe_id;
- if (nd->state == ncsi_dev_state_probe_gvi)
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ if (nd->state == ncsi_dev_state_probe_cis) {
+ nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ } else if (nd->state == ncsi_dev_state_probe_gvi) {
nd->state = ncsi_dev_state_probe_gc;
- else if (nd->state == ncsi_dev_state_probe_gc)
+ } else if (nd->state == ncsi_dev_state_probe_gc) {
nd->state = ncsi_dev_state_probe_gls;
- else
+ } else {
+ nd->state = ncsi_dev_state_probe_cis;
+ ndp->channel_probe_id++;
+ }
+
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe_dp;
+ }
break;
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
ndp->requests[i].ndp = ndp;
timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
+ ndp->channel_count = NCSI_RESERVED_CHANNEL;
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
if (!(ndp->flags & NCSI_DEV_PROBED)) {
ndp->package_probe_id = 0;
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index bee290d0f48b..e28be33bdf2c 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
struct ncsi_rsp_gc_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
+ struct ncsi_package *np;
size_t size;
/* Find the channel */
rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
- NULL, &nc);
+ &np, &nc);
if (!nc)
return -ENODEV;
@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
*/
nc->vlan_filter.bitmap = U64_MAX;
nc->vlan_filter.n_vids = rsp->vlan_cnt;
+ np->ndp->channel_count = rsp->channel_cnt;
return 0;
}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 3126911f5042..b00fc285b334 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -815,12 +815,21 @@ int __init netfilter_init(void)
if (ret < 0)
goto err;
+#ifdef CONFIG_LWTUNNEL
+ ret = netfilter_lwtunnel_init();
+ if (ret < 0)
+ goto err_lwtunnel_pernet;
+#endif
ret = netfilter_log_init();
if (ret < 0)
- goto err_pernet;
+ goto err_log_pernet;
return 0;
-err_pernet:
+err_log_pernet:
+#ifdef CONFIG_LWTUNNEL
+ netfilter_lwtunnel_fini();
+err_lwtunnel_pernet:
+#endif
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 3184cc6be4c9..61431690cbd5 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex or ip_set_ref_lock is held: */
-#define ip_set_dereference(p) \
- rcu_dereference_protected(p, \
+#define ip_set_dereference(inst) \
+ rcu_dereference_protected((inst)->ip_set_list, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
- lockdep_is_held(&ip_set_ref_lock))
+ lockdep_is_held(&ip_set_ref_lock) || \
+ (inst)->is_deleted)
#define ip_set(inst, id) \
- ip_set_dereference((inst)->ip_set_list)[id]
+ ip_set_dereference(inst)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
#define ip_set_dereference_nfnl(p) \
@@ -1133,7 +1134,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
- tmp = ip_set_dereference(inst->ip_set_list);
+ tmp = ip_set_dereference(inst);
memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
@@ -1172,23 +1173,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
.len = IPSET_MAXNAMELEN - 1 },
};
+/* In order to return quickly when destroying a single set, it is split
+ * into two stages:
+ * - Cancel garbage collector
+ * - Destroy the set itself via call_rcu()
+ */
+
static void
-ip_set_destroy_set(struct ip_set *set)
+ip_set_destroy_set_rcu(struct rcu_head *head)
{
- pr_debug("set: %s\n", set->name);
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
- /* Must call it without holding any lock */
set->variant->destroy(set);
module_put(set->type->me);
kfree(set);
}
static void
-ip_set_destroy_set_rcu(struct rcu_head *head)
+_destroy_all_sets(struct ip_set_net *inst)
{
- struct ip_set *set = container_of(head, struct ip_set, rcu);
+ struct ip_set *set;
+ ip_set_id_t i;
+ bool need_wait = false;
- ip_set_destroy_set(set);
+ /* First cancel gc's: set:list sets are flushed as well */
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ set->variant->cancel_gc(set);
+ if (set->type->features & IPSET_TYPE_NAME)
+ need_wait = true;
+ }
+ }
+ /* Must wait for flush to be really finished */
+ if (need_wait)
+ rcu_barrier();
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ ip_set(inst, i) = NULL;
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+ }
+ }
}
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
@@ -1202,11 +1230,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
-
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
- * ip_set_put|get_nfnl_* functions, that way we
+ * ip_set_nfnl_get_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
@@ -1214,8 +1241,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
* without holding the lock.
*/
if (!attr[IPSET_ATTR_SETNAME]) {
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
@@ -1226,15 +1251,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
}
inst->is_destroyed = true;
read_unlock_bh(&ip_set_ref_lock);
- for (i = 0; i < inst->ip_set_max; i++) {
- s = ip_set(inst, i);
- if (s) {
- ip_set(inst, i) = NULL;
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
- ip_set_destroy_set(s);
- }
- }
+ _destroy_all_sets(inst);
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
@@ -1255,12 +1272,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
if (features & IPSET_TYPE_NAME) {
/* Must wait for flush to be really finished */
rcu_barrier();
}
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
@@ -2365,30 +2382,25 @@ ip_set_net_init(struct net *net)
}
static void __net_exit
-ip_set_net_exit(struct net *net)
+ip_set_net_pre_exit(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
- struct ip_set *set = NULL;
- ip_set_id_t i;
-
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
+}
- nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < inst->ip_set_max; i++) {
- set = ip_set(inst, i);
- if (set) {
- ip_set(inst, i) = NULL;
- set->variant->cancel_gc(set);
- ip_set_destroy_set(set);
- }
- }
- nfnl_unlock(NFNL_SUBSYS_IPSET);
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ _destroy_all_sets(inst);
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
.init = ip_set_net_init,
+ .pre_exit = ip_set_net_pre_exit,
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 6c3f28bc59b3..bfae7066936b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
- int ret;
+ int ret = 0;
- list_for_each_entry(e, &map->members, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (d->before == 0) {
ret = 1;
+ goto out;
} else if (d->before > 0) {
next = list_next_entry(e, list);
ret = !list_is_last(&e->list, &map->members) &&
@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
} else {
ret = prev && prev->id == d->refid;
}
- return ret;
+ goto out;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
static void
@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* Find where to add the new entry */
n = prev = next = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
- struct set_elem *e, *next, *prev = NULL;
+ struct set_elem *e, *n, *next, *prev = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_safe(e, n, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -424,14 +428,8 @@ static void
list_set_destroy(struct ip_set *set)
{
struct list_set *map = set->data;
- struct set_elem *e, *n;
- list_for_each_entry_safe(e, n, &map->members, list) {
- list_del(&e->list);
- ip_set_put_byindex(map->net, e->id);
- ip_set_ext_destroy(set, e);
- kfree(e);
- }
+ WARN_ON_ONCE(!list_empty(&map->members));
kfree(map);
set->data = NULL;
@@ -549,6 +547,9 @@ list_set_cancel_gc(struct ip_set *set)
if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc);
+
+ /* Flush list to drop references to other ipsets */
+ list_set_flush(set);
}
static const struct ip_set_type_variant set_variant = {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 74112e9c5dab..6c40bdf8b05a 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -22,9 +22,6 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
-#ifdef CONFIG_LWTUNNEL
-#include <net/netfilter/nf_hooks_lwtunnel.h>
-#endif
#include <linux/rculist_nulls.h>
static bool enable_hooks __read_mostly;
@@ -612,9 +609,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
-#ifdef CONFIG_LWTUNNEL
- NF_SYSCTL_CT_LWTUNNEL,
-#endif
NF_SYSCTL_CT_LAST_SYSCTL,
};
@@ -946,15 +940,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
-#ifdef CONFIG_LWTUNNEL
- [NF_SYSCTL_CT_LWTUNNEL] = {
- .procname = "nf_hooks_lwtunnel",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
- },
-#endif
};
static struct ctl_table nf_ct_netfilter_table[] = {
diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
index 00e89ffd78f6..d8ebebc9775d 100644
--- a/net/netfilter/nf_hooks_lwtunnel.c
+++ b/net/netfilter/nf_hooks_lwtunnel.c
@@ -3,6 +3,9 @@
#include <linux/sysctl.h>
#include <net/lwtunnel.h>
#include <net/netfilter/nf_hooks_lwtunnel.h>
+#include <linux/netfilter.h>
+
+#include "nf_internals.h"
static inline int nf_hooks_lwtunnel_get(void)
{
@@ -50,4 +53,71 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
return ret;
}
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
+
+static struct ctl_table nf_lwtunnel_sysctl_table[] = {
+ {
+ .procname = "nf_hooks_lwtunnel",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
+ },
+};
+
+static int __net_init nf_lwtunnel_net_init(struct net *net)
+{
+ struct ctl_table_header *hdr;
+ struct ctl_table *table;
+
+ table = nf_lwtunnel_sysctl_table;
+ if (!net_eq(net, &init_net)) {
+ table = kmemdup(nf_lwtunnel_sysctl_table,
+ sizeof(nf_lwtunnel_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto err_alloc;
+ }
+
+ hdr = register_net_sysctl_sz(net, "net/netfilter", table,
+ ARRAY_SIZE(nf_lwtunnel_sysctl_table));
+ if (!hdr)
+ goto err_reg;
+
+ net->nf.nf_lwtnl_dir_header = hdr;
+
+ return 0;
+err_reg:
+ if (!net_eq(net, &init_net))
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void __net_exit nf_lwtunnel_net_exit(struct net *net)
+{
+ const struct ctl_table *table;
+
+ table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+}
+
+static struct pernet_operations nf_lwtunnel_net_ops = {
+ .init = nf_lwtunnel_net_init,
+ .exit = nf_lwtunnel_net_exit,
+};
+
+int __init netfilter_lwtunnel_init(void)
+{
+ return register_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+
+void netfilter_lwtunnel_fini(void)
+{
+ unregister_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+#else
+int __init netfilter_lwtunnel_init(void) { return 0; }
+void netfilter_lwtunnel_fini(void) {}
#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 832ae64179f0..25403023060b 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
/* nf_log.c */
int __init netfilter_log_init(void);
+#ifdef CONFIG_LWTUNNEL
+/* nf_hooks_lwtunnel.c */
+int __init netfilter_lwtunnel_init(void);
+void netfilter_lwtunnel_fini(void);
+#endif
+
/* core.c */
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index be3b4c90d2ed..e8dcf41d360d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5740,8 +5740,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
- set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
- set->dlen) < 0)
+ nft_set_datatype(set), set->dlen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
@@ -11073,6 +11072,9 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
return 0;
default:
+ if (type != NFT_DATA_VALUE)
+ return -EINVAL;
+
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
if (len == 0)
@@ -11081,8 +11083,6 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
sizeof_field(struct nft_regs, data))
return -ERANGE;
- if (data != NULL && type != NFT_DATA_VALUE)
- return -EINVAL;
return 0;
}
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00f4bd21c59b..f1c31757e496 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu);
+ rcu_read_lock();
nfqnl_flush(inst, NULL, 0);
+ rcu_read_unlock();
kfree(inst);
module_put(THIS_MODULE);
}
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 37cfe6dd712d..b58f62195ff3 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
- hooks = (1 << NF_INET_PRE_ROUTING);
- if (priv->flags & NFTA_FIB_F_IIF) {
- hooks |= (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_FORWARD);
- }
+ hooks = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index b314ca728a29..f3080fa1b226 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -132,7 +132,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return -EINVAL;
err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
- &priv->dreg, NULL, set->dtype,
+ &priv->dreg, NULL,
+ nft_set_datatype(set),
set->dlen);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index ba0d3683a45d..9139ce38ea7b 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
+ if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
+ return -EINVAL;
+
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 0a689c8e0295..50429cbd42da 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
- u8 vlan_hlen = 0;
-
- if ((skb->protocol == htons(ETH_P_8021AD) ||
- skb->protocol == htons(ETH_P_8021Q)) &&
- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
- vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ if (offset < VLAN_ETH_HLEN) {
u8 ethlen = len;
- if (vlan_hlen &&
- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
- return false;
- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+ if (offset + len > VLAN_ETH_HLEN)
+ ethlen -= offset + len - VLAN_ETH_HLEN;
- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+ memcpy(dst_u8, vlanh + offset, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN + vlan_hlen;
+ offset = ETH_HLEN;
} else {
- offset -= VLAN_HLEN + vlan_hlen;
+ offset -= VLAN_HLEN;
}
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff;
}
-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
{
- unsigned int len = priv->offset + priv->len;
+ unsigned int boundary = offset + len;
/* data past ether src/dst requested, copy needed */
- if (len > offsetof(struct ethhdr, h_proto))
+ if (boundary > offsetof(struct ethhdr, h_proto))
return true;
return false;
@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
if (skb_vlan_tag_present(skb) &&
- nft_payload_need_vlan_copy(priv)) {
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
@@ -659,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
struct nft_payload *priv = nft_expr_priv(expr);
u32 base;
+ if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
+ !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
+ return -EINVAL;
+
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_TUN_HEADER:
@@ -810,21 +805,79 @@ struct nft_payload_set {
u8 csum_flags;
};
+/* This is not struct vlan_hdr. */
+struct nft_payload_vlan_hdr {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+static bool
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+ int *vlan_hlen)
+{
+ struct nft_payload_vlan_hdr *vlanh;
+ __be16 vlan_proto;
+ u16 vlan_tci;
+
+ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
+ *vlan_hlen = VLAN_HLEN;
+ return true;
+ }
+
+ switch (offset) {
+ case offsetof(struct vlan_ethhdr, h_vlan_proto):
+ if (len == 2) {
+ vlan_proto = nft_reg_load_be16(src);
+ skb->vlan_proto = vlan_proto;
+ } else if (len == 4) {
+ vlanh = (struct nft_payload_vlan_hdr *)src;
+ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
+ ntohs(vlanh->h_vlan_TCI));
+ } else {
+ return false;
+ }
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (len != 2)
+ return false;
+
+ vlan_tci = ntohs(nft_reg_load_be16(src));
+ skb->vlan_tci = vlan_tci;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
const u32 *src = &regs->data[priv->sreg];
- int offset, csum_offset;
+ int offset, csum_offset, vlan_hlen = 0;
+ struct sk_buff *skb = pkt->skb;
__wsum fsum, tsum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
- offset = skb_mac_header(skb) - skb->data;
+
+ if (skb_vlan_tag_present(skb) &&
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_set_vlan(src, skb,
+ priv->offset, priv->len,
+ &vlan_hlen))
+ goto err;
+
+ if (!vlan_hlen)
+ return;
+ }
+
+ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 4e7c968cde2d..5e3ca068f04e 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
- sock_hold(sk);
+ if (sk->sk_state == TCP_LISTEN)
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 2928c142a2dd..3b980bf2770b 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -168,8 +168,13 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
static void ovs_ct_get_labels(const struct nf_conn *ct,
struct ovs_key_ct_labels *labels)
{
- struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+ struct nf_conn_labels *cl = NULL;
+ if (ct) {
+ if (ct->master && !nf_ct_is_confirmed(ct))
+ ct = ct->master;
+ cl = nf_ct_labels_find(ct);
+ }
if (cl)
memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
else
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9ee622fb1160..2520708b06a1 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -830,7 +830,6 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
u32 max;
if (*index) {
-again:
rcu_read_lock();
p = idr_find(&idrinfo->action_idr, *index);
@@ -839,7 +838,7 @@ again:
* index but did not assign the pointer yet.
*/
rcu_read_unlock();
- goto again;
+ return -EAGAIN;
}
if (!p) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index baac083fd8f1..2a96d9c1db65 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -41,21 +41,26 @@ static struct workqueue_struct *act_ct_wq;
static struct rhashtable zones_ht;
static DEFINE_MUTEX(zones_mutex);
+struct zones_ht_key {
+ struct net *net;
+ u16 zone;
+};
+
struct tcf_ct_flow_table {
struct rhash_head node; /* In zones tables */
struct rcu_work rwork;
struct nf_flowtable nf_ft;
refcount_t ref;
- u16 zone;
+ struct zones_ht_key key;
bool dying;
};
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
- .key_offset = offsetof(struct tcf_ct_flow_table, zone),
- .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .key_offset = offsetof(struct tcf_ct_flow_table, key),
+ .key_len = sizeof_field(struct tcf_ct_flow_table, key),
.automatic_shrinking = true,
};
@@ -316,11 +321,12 @@ static struct nf_flowtable_type flowtable_ct = {
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
+ struct zones_ht_key key = { .net = net, .zone = params->zone };
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
mutex_lock(&zones_mutex);
- ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
goto out_unlock;
@@ -329,7 +335,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
goto err_alloc;
refcount_set(&ct_ft->ref, 1);
- ct_ft->zone = params->zone;
+ ct_ft->key = key;
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
if (err)
goto err_insert;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2a637a17061b..e22ff003d52e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -676,6 +676,7 @@ struct Qdisc noop_qdisc = {
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
},
+ .owner = -1,
};
EXPORT_SYMBOL(noop_qdisc);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 79e93a19d5fa..06e03f5cd7ce 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
- removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+ removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
GFP_KERNEL);
if (!removed)
return -ENOMEM;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 1ab17e8a7260..b284a06b5a75 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1151,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- if (!cycle) {
- NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
- return -EINVAL;
- }
-
if (cycle < 0 || cycle > INT_MAX) {
NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
return -EINVAL;
@@ -1164,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle;
}
+ if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
+ NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
+ return -EINVAL;
+ }
+
taprio_calculate_gate_durations(q, new);
return 0;
@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
{
bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
- if (!qopt && !dev->num_tc) {
- NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
- return -EINVAL;
- }
-
- /* If num_tc is already set, it means that the user already
- * configured the mqprio part
- */
- if (dev->num_tc)
+ if (!qopt) {
+ if (!dev->num_tc) {
+ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+ return -EINVAL;
+ }
return 0;
+ }
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
@@ -1848,6 +1845,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
q->flags = taprio_flags;
+ /* Needed for length_to_duration() during netlink attribute parsing */
+ taprio_set_picos_per_byte(dev, q);
+
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
return err;
@@ -1907,7 +1907,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
goto free_sched;
- taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e50a286fd0fb..c5f98c6b2561 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -459,29 +459,11 @@ out:
static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
unsigned long mask)
{
- struct net *nnet = sock_net(nsk);
-
nsk->sk_userlocks = osk->sk_userlocks;
- if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
nsk->sk_sndbuf = osk->sk_sndbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_sndbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
- else
- WRITE_ONCE(nsk->sk_sndbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_wmem));
- }
- if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
nsk->sk_rcvbuf = osk->sk_rcvbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_rcvbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
- else
- WRITE_ONCE(nsk->sk_rcvbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_rmem));
- }
}
static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c7af0220f82f..369310909fc9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
- if (unlikely(snd_buf->len > snd_buf->buflen))
+ if (unlikely(snd_buf->len > snd_buf->buflen)) {
+ status = -EIO;
goto wrap_failed;
+ }
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 96ab50eda9c2..73a90ad873fb 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1069,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
goto out_denied_free;
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
- in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+ in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!in_token->pages)
goto out_denied_free;
in_token->page_base = 0;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2b4b1276d4e8..d9cda1e53a01 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1557,9 +1557,11 @@ out_drop:
*/
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
+ struct rpc_timeout timeout = {
+ .to_increment = 0,
+ };
struct rpc_task *task;
int proc_error;
- struct rpc_timeout timeout;
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
@@ -1612,6 +1614,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_initval = req->rq_xprt->timeout->to_initval;
timeout.to_retries = req->rq_xprt->timeout->to_retries;
}
+ timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c1e890a82434..500320e5ca47 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2105,6 +2105,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
} else {
n = tipc_node_find_by_id(net, ehdr->id);
}
+ skb_dst_force(skb);
tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
if (!skb)
return;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e4af6616e1df..142f56770b77 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
{
- return sk->sk_state != TCP_LISTEN &&
- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return state != TCP_LISTEN &&
+ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}
static void unix_write_space(struct sock *sk)
@@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)
struct socket_wq *wq;
rcu_read_lock();
- if (unix_writable(sk)) {
+ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
- other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -731,7 +724,7 @@ static int unix_listen(struct socket *sock, int backlog)
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
- if (!u->addr)
+ if (!READ_ONCE(u->addr))
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
- sk->sk_state = TCP_LISTEN;
+ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->listener = NULL;
@@ -1131,8 +1125,8 @@ static struct sock *unix_find_other(struct net *net,
static int unix_autobind(struct sock *sk)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
@@ -1155,6 +1149,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ old_hash = sk->sk_hash;
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
@@ -1195,8 +1190,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
@@ -1234,6 +1229,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
+ old_hash = sk->sk_hash;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
@@ -1261,8 +1257,8 @@ out:
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1280,6 +1276,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
+ old_hash = sk->sk_hash;
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
@@ -1369,7 +1366,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
- !unix_sk(sk)->addr) {
+ !READ_ONCE(unix_sk(sk)->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1399,7 +1396,8 @@ restart:
if (err)
goto out_unlock;
- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1416,13 +1414,20 @@ restart:
unix_peer(sk) = other;
if (!other)
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
- if (other != old_peer)
+ if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);
+
+ unix_state_lock(old_peer);
+ if (!unix_peer(old_peer))
+ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+ unix_state_unlock(old_peer);
+ }
+
sock_put(old_peer);
} else {
unix_peer(sk) = other;
@@ -1470,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sk_buff *skb = NULL;
long timeo;
int err;
- int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1481,7 +1485,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1534,7 +1539,7 @@ restart:
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- if (unix_recvq_full(other)) {
+ if (unix_recvq_full_lockless(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -1559,9 +1564,7 @@ restart:
Well, and we have to recheck the state after socket locked.
*/
- st = sk->sk_state;
-
- switch (st) {
+ switch (READ_ONCE(sk->sk_state)) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
@@ -1576,7 +1579,7 @@ restart:
unix_state_lock_nested(sk, U_LOCK_SECOND);
- if (sk->sk_state != st) {
+ if (sk->sk_state != TCP_CLOSE) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
@@ -1629,7 +1632,7 @@ restart:
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1701,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
goto out;
arg->err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1950,14 +1953,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
}
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
err = -EMSGSIZE;
- if (len > sk->sk_sndbuf - 32)
+ if (len > READ_ONCE(sk->sk_sndbuf) - 32)
goto out;
if (len > SKB_MAX_ALLOC) {
@@ -2039,7 +2043,7 @@ restart_locked:
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
@@ -2216,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (msg->msg_namelen) {
- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
@@ -2237,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2330,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
@@ -2344,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2609,10 +2613,24 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
{
struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
- skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
- skb = NULL;
+ if (!unix_skb_len(skb)) {
+ struct sk_buff *unlinked_skb = NULL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ if (copied && (!u->oob_skb || skb == u->oob_skb)) {
+ skb = NULL;
+ } else if (flags & MSG_PEEK) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ } else {
+ unlinked_skb = skb;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ __skb_unlink(unlinked_skb, &sk->sk_receive_queue);
+ }
+
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ consume_skb(unlinked_skb);
} else {
struct sk_buff *unlinked_skb = NULL;
@@ -2621,18 +2639,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
if (skb == u->oob_skb) {
if (copied) {
skb = NULL;
- } else if (sock_flag(sk, SOCK_URGINLINE)) {
- if (!(flags & MSG_PEEK)) {
+ } else if (!(flags & MSG_PEEK)) {
+ if (sock_flag(sk, SOCK_URGINLINE)) {
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
+ } else {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ WRITE_ONCE(u->oob_skb, NULL);
+ unlinked_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
- } else if (flags & MSG_PEEK) {
- skb = NULL;
- } else {
- __skb_unlink(skb, &sk->sk_receive_queue);
- WRITE_ONCE(u->oob_skb, NULL);
- unlinked_skb = skb;
- skb = skb_peek(&sk->sk_receive_queue);
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
}
@@ -2649,7 +2667,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
return unix_read_skb(sk, recv_actor);
@@ -2673,7 +2691,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;
- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
@@ -3004,7 +3022,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;
- if (sk->sk_state == TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
@@ -3089,12 +3107,23 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
case SIOCATMARK:
{
+ struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int answ = 0;
+ mutex_lock(&u->iolock);
+
skb = skb_peek(&sk->sk_receive_queue);
- if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
- answ = 1;
+ if (skb) {
+ struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
+
+ if (skb == oob_skb ||
+ (!oob_skb && !unix_skb_len(skb)))
+ answ = 1;
+ }
+
+ mutex_unlock(&u->iolock);
+
err = put_user(answ, (int __user *)arg);
}
break;
@@ -3116,12 +3145,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
@@ -3143,14 +3174,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- sk->sk_state == TCP_CLOSE)
+ state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
- if (unix_writable(sk))
+ if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
@@ -3161,12 +3192,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
@@ -3186,19 +3219,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->sk_type == SOCK_SEQPACKET) {
- if (sk->sk_state == TCP_CLOSE)
- mask |= EPOLLHUP;
- /* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
- return mask;
- }
+ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+ mask |= EPOLLHUP;
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
- writable = unix_writable(sk);
+ writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index ae39538c5042..937edf4afed4 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;
- if (sk->sk_state == TCP_LISTEN) {
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
- if (sk->sk_state == TCP_LISTEN) {
- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
- rep->udiag_state = sk->sk_state;
+ rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
sk_for_each(sk, &net->unx.table.buckets[slot]) {
if (num < s_num)
goto next;
- if (!(req->udiag_states & (1 << sk->sk_state)))
+ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
goto next;
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3fb1b637352a..4b1f45e3070e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
if (wk) {
list_del_init(&wk->entry);
if (!list_empty(&rdev->wiphy_work_list))
- schedule_work(work);
+ queue_work(system_unbound_wq, work);
spin_unlock_irq(&rdev->wiphy_work_lock);
wk->func(&rdev->wiphy, wk);
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index e106dcea3977..c569c37da317 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_period = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
out->ftm.burst_period =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.num_bursts_exp = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
out->ftm.num_bursts_exp =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
if (capa->ftm.max_bursts_exponent >= 0 &&
out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_duration = 15;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
out->ftm.burst_duration =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
out->ftm.ftms_per_burst = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.ftmr_retries = 3;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
out->ftm.ftmr_retries =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
if (out->ftm.request_lci && !capa->ftm.request_lci) {
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 43897a5269b6..755af47b88b9 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018, 2021-2023 Intel Corporation
+ * Copyright (C) 2018, 2021-2024 Intel Corporation
*/
#ifndef __CFG80211_RDEV_OPS
#define __CFG80211_RDEV_OPS
@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
struct cfg80211_scan_request *request)
{
int ret;
+
+ if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+ return -EINVAL;
+
trace_rdev_scan(&rdev->wiphy, request);
ret = rdev->ops->scan(&rdev->wiphy, request);
trace_rdev_return_int(&rdev->wiphy, ret);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 127853877a0a..0222ede0feb6 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
LIST_HEAD(coloc_ap_list);
bool need_scan_psc = true;
const struct ieee80211_sband_iftype_data *iftd;
+ size_t size, offs_ssids, offs_6ghz_params, offs_ies;
rdev_req->scan_6ghz = true;
@@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
spin_unlock_bh(&rdev->bss_lock);
}
- request = kzalloc(struct_size(request, channels, n_channels) +
- sizeof(*request->scan_6ghz_params) * count +
- sizeof(*request->ssids) * rdev_req->n_ssids,
- GFP_KERNEL);
+ size = struct_size(request, channels, n_channels);
+ offs_ssids = size;
+ size += sizeof(*request->ssids) * rdev_req->n_ssids;
+ offs_6ghz_params = size;
+ size += sizeof(*request->scan_6ghz_params) * count;
+ offs_ies = size;
+ size += rdev_req->ie_len;
+
+ request = kzalloc(size, GFP_KERNEL);
if (!request) {
cfg80211_free_coloc_ap_list(&coloc_ap_list);
return -ENOMEM;
@@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
*request = *rdev_req;
request->n_channels = 0;
- request->scan_6ghz_params =
- (void *)&request->channels[n_channels];
+ request->n_6ghz_params = 0;
+ if (rdev_req->n_ssids) {
+ /*
+ * Add the ssids from the parent scan request to the new
+ * scan request, so the driver would be able to use them
+ * in its probe requests to discover hidden APs on PSC
+ * channels.
+ */
+ request->ssids = (void *)request + offs_ssids;
+ memcpy(request->ssids, rdev_req->ssids,
+ sizeof(*request->ssids) * request->n_ssids);
+ }
+ request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+
+ if (rdev_req->ie_len) {
+ void *ie = (void *)request + offs_ies;
+
+ memcpy(ie, rdev_req->ie, rdev_req->ie_len);
+ request->ie = ie;
+ }
/*
* PSC channels should not be scanned in case of direct scan with 1 SSID
@@ -978,17 +1002,8 @@ skip:
if (request->n_channels) {
struct cfg80211_scan_request *old = rdev->int_scan_req;
- rdev->int_scan_req = request;
- /*
- * Add the ssids from the parent scan request to the new scan
- * request, so the driver would be able to use them in its
- * probe requests to discover hidden APs on PSC channels.
- */
- request->ssids = (void *)&request->channels[request->n_channels];
- request->n_ssids = rdev_req->n_ssids;
- memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
- request->n_ssids);
+ rdev->int_scan_req = request;
/*
* If this scan follows a previous scan, save the scan start
@@ -2128,7 +2143,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
struct ieee80211_he_operation *he_oper;
tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+ if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 &&
+ tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) {
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
he_oper = (void *)&tmp->data[1];
@@ -3400,10 +3416,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
wiphy = &rdev->wiphy;
/* Determine number of channels, needed to allocate creq */
- if (wreq && wreq->num_channels)
+ if (wreq && wreq->num_channels) {
+ /* Passed from userspace so should be checked */
+ if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
+ return -EINVAL;
n_channels = wreq->num_channels;
- else
+ } else {
n_channels = ieee80211_get_num_supported_channels(wiphy);
+ }
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@@ -3477,8 +3497,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
creq->ssids[0].ssid_len = wreq->essid_len;
}
- if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
+ if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
+ creq->ssids = NULL;
creq->n_ssids = 0;
+ }
}
for (i = 0; i < NUM_NL80211_BANDS; i++)
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 565511a3f461..62f26618f674 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -5,7 +5,7 @@
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
rdev->suspended = false;
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
wiphy_unlock(&rdev->wiphy);
if (ret)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2bde8a354631..082c6f9c5416 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
{
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
+ int ret;
wdev = dev->ieee80211_ptr;
if (!wdev)
@@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
memset(sinfo, 0, sizeof(*sinfo));
- return rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_lock(&rdev->wiphy);
+ ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_unlock(&rdev->wiphy);
+
+ return ret;
}
EXPORT_SYMBOL(cfg80211_get_station);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 727aa20be4bd..7d1c0986f9bb 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- struct net_device *dev = xdp->rxq->dev;
- u32 qid = xdp->rxq->queue_index;
-
if (!xsk_is_bound(xs))
return -ENXIO;
- if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 475b904fe68b..66e07de2de35 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3910,15 +3910,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
/* Impossible. Such dst must be popped before reaches point of failure. */
}
-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
+static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
{
- if (dst) {
- if (dst->obsolete) {
- dst_release(dst);
- dst = NULL;
- }
- }
- return dst;
+ if (dst->obsolete)
+ sk_dst_reset(sk);
}
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs
index e9a81052728a..1297a4be32e8 100644
--- a/rust/kernel/alloc/vec_ext.rs
+++ b/rust/kernel/alloc/vec_ext.rs
@@ -4,7 +4,6 @@
use super::{AllocError, Flags};
use alloc::vec::Vec;
-use core::ptr;
/// Extensions to [`Vec`].
pub trait VecExt<T>: Sized {
@@ -141,7 +140,11 @@ impl<T> VecExt<T> for Vec<T> {
// `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be
// dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity
// to be zero if no memory has been allocated yet.
- let ptr = if cap == 0 { ptr::null_mut() } else { old_ptr };
+ let ptr = if cap == 0 {
+ core::ptr::null_mut()
+ } else {
+ old_ptr
+ };
// SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to
// `krealloc_aligned`. We also verified that the type is not a ZST.
diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
index 67956f6496a5..9d920419a62c 100644
--- a/scripts/Makefile.dtbinst
+++ b/scripts/Makefile.dtbinst
@@ -17,7 +17,7 @@ include $(srctree)/scripts/Kbuild.include
dst := $(INSTALL_DTBS_PATH)
quiet_cmd_dtb_install = INSTALL $@
- cmd_dtb_install = install -D $< $@
+ cmd_dtb_install = install -D -m 0644 $< $@
$(dst)/%: $(obj)/%
$(call cmd,dtb_install)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index d35f55e0d141..e85be7721a48 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -146,7 +146,7 @@ $(call multi_depend, $(host-cxxmulti), , -objs -cxxobjs)
# Create .o file from a single .cc (C++) file
quiet_cmd_host-cxxobjs = HOSTCXX $@
cmd_host-cxxobjs = $(HOSTCXX) $(hostcxx_flags) -c -o $@ $<
-$(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
+$(host-cxxobjs): $(obj)/%.o: $(obj)/%.cc FORCE
$(call if_changed_dep,host-cxxobjs)
# Create executable from a single Rust crate (which may consist of
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 38653f3e8108..bf016af8bf8a 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -103,7 +103,7 @@ debian-orig: private version = $(shell dpkg-parsechangelog -S Version | sed 's/-
debian-orig: private orig-name = $(source)_$(version).orig.tar$(debian-orig-suffix)
debian-orig: mkdebian-opts = --need-source
debian-orig: linux.tar$(debian-orig-suffix) debian
- $(Q)if [ "$(df --output=target .. 2>/dev/null)" = "$(df --output=target $< 2>/dev/null)" ]; then \
+ $(Q)if [ "$$(df --output=target .. 2>/dev/null)" = "$$(df --output=target $< 2>/dev/null)" ]; then \
ln -f $< ../$(orig-name); \
else \
cp $< ../$(orig-name); \
diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test
index d3760f7749d4..96615e50836b 100644
--- a/scripts/atomic/kerneldoc/sub_and_test
+++ b/scripts/atomic/kerneldoc/sub_and_test
@@ -1,7 +1,7 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
- * @i: ${int} value to add
+ * @i: ${int} value to subtract
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - @i) with ${desc_order} ordering.
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index a18657072541..b47f4daa4515 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -3,7 +3,7 @@
# *** Also keep .gitignore in sync when changing ***
hostprogs-always-$(CONFIG_DTC) += dtc fdtoverlay
-hostprogs-always-$(CHECK_DT_BINDING) += dtc
+hostprogs-always-$(CHECK_DTBS) += dtc
dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
srcpos.o checks.o util.o
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile
index d77ad9079d0f..fcd32fcf3ae0 100644
--- a/scripts/gdb/linux/Makefile
+++ b/scripts/gdb/linux/Makefile
@@ -5,7 +5,7 @@ ifdef building_out_of_srctree
symlinks := $(patsubst $(src)/%,%,$(wildcard $(src)/*.py))
quiet_cmd_symlink = SYMLINK $@
- cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(srctree))/$(src)/%,$@) $@
+ cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(src))/%,$@) $@
always-y += $(symlinks)
$(addprefix $(obj)/, $(symlinks)): FORCE
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 387503daf0f7..85b53069ba7a 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -533,19 +533,6 @@ int conf_read(const char *name)
*/
if (sym->visible == no && !conf_unsaved)
sym->flags &= ~SYMBOL_DEF_USER;
- switch (sym->type) {
- case S_STRING:
- case S_INT:
- case S_HEX:
- /* Reset a string value if it's out of range */
- if (sym_string_within_range(sym, sym->def[S_DEF_USER].val))
- break;
- sym->flags &= ~SYMBOL_VALID;
- conf_unsaved++;
- break;
- default:
- break;
- }
}
}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index a290de36307b..fcc190b67b6f 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -397,35 +397,6 @@ static struct expr *expr_eliminate_yn(struct expr *e)
}
/*
- * bool FOO!=n => FOO
- */
-struct expr *expr_trans_bool(struct expr *e)
-{
- if (!e)
- return NULL;
- switch (e->type) {
- case E_AND:
- case E_OR:
- case E_NOT:
- e->left.expr = expr_trans_bool(e->left.expr);
- e->right.expr = expr_trans_bool(e->right.expr);
- break;
- case E_UNEQUAL:
- // FOO!=n -> FOO
- if (e->left.sym->type == S_TRISTATE) {
- if (e->right.sym == &symbol_no) {
- e->type = E_SYMBOL;
- e->right.sym = NULL;
- }
- }
- break;
- default:
- ;
- }
- return e;
-}
-
-/*
* e1 || e2 -> ?
*/
static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
@@ -476,7 +447,7 @@ static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes);
}
}
- if (sym1->type == S_BOOLEAN && sym1 == sym2) {
+ if (sym1->type == S_BOOLEAN) {
if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) ||
(e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL))
return expr_alloc_symbol(&symbol_yes);
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index d965e427753e..7c0c242318bc 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -284,7 +284,6 @@ void expr_free(struct expr *e);
void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
int expr_eq(struct expr *e1, struct expr *e2);
tristate expr_calc_value(struct expr *e);
-struct expr *expr_trans_bool(struct expr *e);
struct expr *expr_eliminate_dups(struct expr *e);
struct expr *expr_transform(struct expr *e);
int expr_contains_symbol(struct expr *dep, struct symbol *sym);
@@ -302,11 +301,6 @@ static inline int expr_is_yes(struct expr *e)
return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
}
-static inline int expr_is_no(struct expr *e)
-{
- return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no);
-}
-
#ifdef __cplusplus
}
#endif
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index cc400ffe6615..e04dbafd3add 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -1422,7 +1422,6 @@ int main(int ac, char *av[])
conf_parse(name);
fixup_rootmenu(&rootmenu);
- conf_read(NULL);
/* Load the interface and connect signals */
init_main_window(glade_file);
@@ -1430,6 +1429,8 @@ int main(int ac, char *av[])
init_left_tree();
init_right_tree();
+ conf_read(NULL);
+
switch (view_mode) {
case SINGLE_VIEW:
display_tree_part();
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 53151c5a6028..eef9b63cdf11 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -398,8 +398,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
dep = expr_transform(dep);
dep = expr_alloc_and(expr_copy(basedep), dep);
dep = expr_eliminate_dups(dep);
- if (menu->sym && menu->sym->type != S_TRISTATE)
- dep = expr_trans_bool(dep);
prop->visible.expr = dep;
/*
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index aa0e25ee5119..0e439d3d48d1 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -14,6 +14,7 @@
struct symbol symbol_yes = {
.name = "y",
+ .type = S_TRISTATE,
.curr = { "y", yes },
.menus = LIST_HEAD_INIT(symbol_yes.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -21,6 +22,7 @@ struct symbol symbol_yes = {
struct symbol symbol_mod = {
.name = "m",
+ .type = S_TRISTATE,
.curr = { "m", mod },
.menus = LIST_HEAD_INIT(symbol_mod.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -28,6 +30,7 @@ struct symbol symbol_mod = {
struct symbol symbol_no = {
.name = "n",
+ .type = S_TRISTATE,
.curr = { "n", no },
.menus = LIST_HEAD_INIT(symbol_no.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -820,8 +823,7 @@ const char *sym_get_string_value(struct symbol *sym)
case no:
return "n";
case mod:
- sym_calc_value(modules_sym);
- return (modules_sym->curr.tri == no) ? "n" : "m";
+ return "m";
case yes:
return "y";
}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 7862a8101747..518c70b8db50 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -179,10 +179,10 @@ kallsyms_step()
kallsyms_S=${kallsyms_vmlinux}.S
vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o}
- mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+ mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
- info AS ${kallsyms_S}
+ info AS ${kallsymso}
${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
-c -o ${kallsymso} ${kallsyms_S}
@@ -193,7 +193,7 @@ kallsyms_step()
mksysmap()
{
info NM ${2}
- ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2} ${3}
+ ${NM} -n "${1}" | sed -f "${srctree}/scripts/mksysmap" > "${2}"
}
sorttable()
@@ -201,7 +201,6 @@ sorttable()
${objtree}/scripts/sorttable ${1}
}
-# Delete output files in case of error
cleanup()
{
rm -f .btf.*
@@ -282,7 +281,7 @@ if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
${RESOLVE_BTFIDS} vmlinux
fi
-mksysmap vmlinux System.map ${kallsymso}
+mksysmap vmlinux System.map
if is_enabled CONFIG_BUILDTIME_TABLE_SORT; then
info SORTTAB vmlinux
diff --git a/scripts/make_fit.py b/scripts/make_fit.py
index 3de90c5a094b..263147df80a4 100755
--- a/scripts/make_fit.py
+++ b/scripts/make_fit.py
@@ -190,7 +190,7 @@ def output_dtb(fsw, seq, fname, arch, compress):
Args:
fsw (libfdt.FdtSw): Object to use for writing
seq (int): Sequence number (1 for first)
- fmame (str): Filename containing the DTB
+ fname (str): Filename containing the DTB
arch: FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
@@ -211,7 +211,6 @@ def output_dtb(fsw, seq, fname, arch, compress):
fsw.property_string('type', 'flat_dt')
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
- fsw.property('compatible', bytes(compat))
with open(fname, 'rb') as inf:
compressed = compress_data(inf, compress)
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 57ff5656d566..c12723a04655 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -1,22 +1,16 @@
-#!/bin/sh -x
-# Based on the vmlinux file create the System.map file
+#!/bin/sed -f
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# sed script to filter out symbols that are not needed for System.map,
+# or not suitable for kallsyms. The input should be 'nm -n <file>'.
+#
# System.map is used by module-init tools and some debugging
# tools to retrieve the actual addresses of symbols in the kernel.
#
-# Usage
-# mksysmap vmlinux System.map [exclude]
-
-
-#####
-# Generate System.map (actual filename passed as second argument)
-# The following refers to the symbol type as per nm(1).
-
# readprofile starts reading symbols when _stext is found, and
# continue until it finds a symbol which is not either of 'T', 't',
# 'W' or 'w'.
#
-
-${NM} -n ${1} | sed >${2} -e "
# ---------------------------------------------------------------------------
# Ignored symbol types
#
@@ -92,13 +86,3 @@ ${NM} -n ${1} | sed >${2} -e "
# ppc stub
/\.long_branch\./d
/\.plt_branch\./d
-
-# ---------------------------------------------------------------------------
-# Ignored kallsyms symbols
-#
-# If the 3rd parameter exists, symbols from it will be omitted from the output.
-# This makes kallsyms have the identical symbol lists in the step 1 and 2.
-# Without this, the step2 would get new symbols generated by scripts/kallsyms.c
-# when CONFIG_KALLSYMS_ALL is enabled. That might require one more pass.
-$(if [ $# -ge 3 ]; then ${NM} ${3} | sed -n '/ U /!s:.* \([^ ]*\)$:/ \1$/d:p'; fi)
-"
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 937294ff164f..f48d72d22dc2 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1647,10 +1647,11 @@ static void read_symbols(const char *modname)
namespace = get_next_modinfo(&info, "import_ns",
namespace);
}
+
+ if (extra_warn && !get_modinfo(&info, "description"))
+ warn("missing MODULE_DESCRIPTION() in %s\n", modname);
}
- if (extra_warn && !get_modinfo(&info, "description"))
- warn("missing MODULE_DESCRIPTION() in %s\n", modname);
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index e095eb1e290e..fffc8af8deb1 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -57,7 +57,8 @@ patch -p1 < %{SOURCE2}
%install
mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
-%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+# DEPMOD=true makes depmod no-op. We do not package depmod-generated files.
+%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install
%{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
@@ -70,10 +71,7 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
%endif
{
- for x in System.map config kernel modules.builtin \
- modules.builtin.modinfo modules.order vmlinuz; do
- echo "/lib/modules/%{KERNELRELEASE}/${x}"
- done
+ echo "/lib/modules/%{KERNELRELEASE}"
for x in alias alias.bin builtin.alias.bin builtin.bin dep dep.bin \
devname softdep symbols symbols.bin; do
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index effbf5982be1..2cff851ebfd7 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -255,21 +255,6 @@ config INIT_ON_FREE_DEFAULT_ON
touching "cold" memory areas. Most cases see 3-5% impact. Some
synthetic workloads have measured as high as 8%.
-config INIT_MLOCKED_ON_FREE_DEFAULT_ON
- bool "Enable mlocked memory zeroing on free"
- depends on !KMSAN
- help
- This config has the effect of setting "init_mlocked_on_free=1"
- on the kernel command line. If it is enabled, all mlocked process
- memory is zeroed when freed. This restriction to mlocked memory
- improves performance over "init_on_free" but can still be used to
- protect confidential data like key material from content exposures
- to other processes, as well as live forensics and cold boot attacks.
- Any non-mlocked memory is not cleared before it is reassigned. This
- configuration can be overwritten by setting "init_mlocked_on_free=0"
- on the command line. The "init_on_free" boot option takes
- precedence over "init_mlocked_on_free".
-
config CC_HAS_ZERO_CALL_USED_REGS
def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
# https://github.com/ClangBuiltLinux/linux/issues/1766
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 45beb1c5f747..6b5181c668b5 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -217,7 +217,7 @@ void aa_audit_rule_free(void *vrule)
}
}
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
{
struct aa_audit_rule *rule;
@@ -230,14 +230,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
if (!rule)
return -ENOMEM;
/* Currently rules are treated as coming from the root ns */
rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
- GFP_KERNEL, true, false);
+ gfp, true, false);
if (IS_ERR(rule->label)) {
int err = PTR_ERR(rule->label);
aa_audit_rule_free(rule);
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index acbb03b9bd25..0c8cc86b417b 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -200,7 +200,7 @@ static inline int complain_error(int error)
}
void aa_audit_rule_free(void *vrule);
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
int aa_audit_rule_known(struct audit_krule *rule);
int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3e568126cd48..c51e24d24d1e 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -546,7 +546,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
#else
static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return -EINVAL;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c0556907c2e6..09da8e639239 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -401,7 +401,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
kfree(entry);
}
-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
+ gfp_t gfp)
{
struct ima_rule_entry *nentry;
int i;
@@ -410,7 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
* Immutable elements are copied over as pointers and data; only
* lsm rules can change
*/
- nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
+ nentry = kmemdup(entry, sizeof(*nentry), gfp);
if (!nentry)
return NULL;
@@ -425,7 +426,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
nentry->lsm[i].args_p,
- &nentry->lsm[i].rule);
+ &nentry->lsm[i].rule,
+ gfp);
if (!nentry->lsm[i].rule)
pr_warn("rule for LSM \'%s\' is undefined\n",
nentry->lsm[i].args_p);
@@ -438,7 +440,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
int i;
struct ima_rule_entry *nentry;
- nentry = ima_lsm_copy_rule(entry);
+ nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
if (!nentry)
return -ENOMEM;
@@ -664,7 +666,7 @@ retry:
}
if (rc == -ESTALE && !rule_reinitialized) {
- lsm_rule = ima_lsm_copy_rule(rule);
+ lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
if (lsm_rule) {
rule_reinitialized = true;
goto retry;
@@ -1140,7 +1142,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
entry->lsm[lsm_rule].type = audit_type;
result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
entry->lsm[lsm_rule].args_p,
- &entry->lsm[lsm_rule].rule);
+ &entry->lsm[lsm_rule].rule,
+ GFP_KERNEL);
if (!entry->lsm[lsm_rule].rule) {
pr_warn("rule for LSM \'%s\' is undefined\n",
entry->lsm[lsm_rule].args_p);
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 22d8b7c28074..7877a64cc6b8 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1110,6 +1110,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
+ struct dentry *old_parent;
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
@@ -1157,9 +1158,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
mnt_dir.mnt = new_dir->mnt;
mnt_dir.dentry = new_dir->mnt->mnt_root;
+ /*
+ * old_dentry may be the root of the common mount point and
+ * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
+ * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
+ * we keep a reference to old_dentry.
+ */
+ old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
+ old_dentry->d_parent;
+
/* new_dir->dentry is equal to new_dentry->d_parent */
- allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
- old_dentry->d_parent,
+ allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
&layer_masks_parent1);
allow_parent2 = collect_domain_accesses(
dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
diff --git a/security/security.c b/security/security.c
index e5da848c50b9..e5ca08789f74 100644
--- a/security/security.c
+++ b/security/security.c
@@ -5332,15 +5332,17 @@ void security_key_post_create_or_update(struct key *keyring, struct key *key,
* @op: rule operator
* @rulestr: rule context
* @lsmrule: receive buffer for audit rule struct
+ * @gfp: GFP flag used for kmalloc
*
* Allocate and initialize an LSM audit rule structure.
*
* Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
* an invalid rule.
*/
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp)
{
- return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule);
+ return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp);
}
/**
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index 52aca71210b4..29c7d4c86f6d 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -21,12 +21,14 @@
* @op: the operator the rule uses
* @rulestr: the text "target" of the rule
* @rule: pointer to the new rule structure returned via this
+ * @gfp: GFP flag used for kmalloc
*
* Returns 0 if successful, -errno if not. On success, the rule structure
* will be allocated internally. The caller must free this structure with
* selinux_audit_rule_free() after use.
*/
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
+ gfp_t gfp);
/**
* selinux_audit_rule_free - free an selinux audit rule structure.
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f20e1968b7f7..e33e55384b75 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -3507,7 +3507,8 @@ void selinux_audit_rule_free(void *vrule)
}
}
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct selinux_state *state = &selinux_state;
struct selinux_policy *policy;
@@ -3548,7 +3549,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
+ tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
if (!tmprule)
return -ENOMEM;
context_init(&tmprule->au_ctxt);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 70ba2841e181..f5cbec1e6a92 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4693,11 +4693,13 @@ static int smack_post_notification(const struct cred *w_cred,
* @op: required testing operator (=, !=, >, <, ...)
* @rulestr: smack label to be audited
* @vrule: pointer to save our own audit rule representation
+ * @gfp: type of the memory for the allocation
*
* Prepare to audit cases where (@field @op @rulestr) is true.
* The label to be audited is created if necessay.
*/
-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct smack_known *skp;
char **rule = (char **)vrule;
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index fad75be5f381..1e0dd1a6d0b0 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -10,7 +10,7 @@ config SECURITY_TOMOYO
help
This selects TOMOYO Linux, pathname-based access control.
Required userspace tools and further information may be
- found at <https://tomoyo.osdn.jp/>.
+ found at <https://tomoyo.sourceforge.net/>.
If you are unsure how to answer this question, answer N.
config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ea3140d510ec..5c7b059a332a 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2787,7 +2787,7 @@ void tomoyo_check_profile(void)
else
continue;
pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
- pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
+ pr_err("Please see https://tomoyo.sourceforge.net/2.6/ for more information.\n");
panic("STOP!");
}
tomoyo_read_unlock(idx);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index b6684a074a59..39944a859ff6 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -111,6 +111,7 @@ static void report_access(const char *access, struct task_struct *target,
/**
* yama_relation_cleanup - remove invalid entries from the relation list
+ * @work: unused
*
*/
static void yama_relation_cleanup(struct work_struct *work)
diff --git a/sound/core/init.c b/sound/core/init.c
index 4e52bbe32786..b9b708cf980d 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -537,6 +537,11 @@ void snd_card_disconnect(struct snd_card *card)
synchronize_irq(card->sync_irq);
snd_info_card_disconnect(card);
+#ifdef CONFIG_SND_DEBUG
+ debugfs_remove(card->debugfs_root);
+ card->debugfs_root = NULL;
+#endif
+
if (card->registered) {
device_del(&card->card_dev);
card->registered = false;
@@ -586,10 +591,6 @@ static int snd_card_do_free(struct snd_card *card)
dev_warn(card->dev, "unable to free card info\n");
/* Not fatal error */
}
-#ifdef CONFIG_SND_DEBUG
- debugfs_remove(card->debugfs_root);
- card->debugfs_root = NULL;
-#endif
if (card->release_completion)
complete(card->release_completion);
if (!card->managed)
diff --git a/sound/core/jack.c b/sound/core/jack.c
index e08b2c4fbd1a..e4bcecdf89b7 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
};
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+static void snd_jack_remove_debugfs(struct snd_jack *jack);
+
static int snd_jack_dev_disconnect(struct snd_device *device)
{
-#ifdef CONFIG_SND_JACK_INPUT_DEV
struct snd_jack *jack = device->device_data;
+ snd_jack_remove_debugfs(jack);
+
+#ifdef CONFIG_SND_JACK_INPUT_DEV
guard(mutex)(&jack->input_dev_lock);
if (!jack->input_dev)
return 0;
@@ -381,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
- debugfs_remove(jack_kctl->jack_debugfs_root);
- jack_kctl->jack_debugfs_root = NULL;
+ struct snd_jack_kctl *jack_kctl;
+
+ list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
+ debugfs_remove(jack_kctl->jack_debugfs_root);
+ jack_kctl->jack_debugfs_root = NULL;
+ }
}
#else /* CONFIG_SND_JACK_INJECTION_DEBUG */
static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
@@ -393,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
}
#endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
@@ -404,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
jack_kctl = kctl->private_data;
if (jack_kctl) {
- snd_jack_debugfs_clear_inject_node(jack_kctl);
list_del(&jack_kctl->list);
kfree(jack_kctl);
}
@@ -497,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
.dev_free = snd_jack_dev_free,
#ifdef CONFIG_SND_JACK_INPUT_DEV
.dev_register = snd_jack_dev_register,
- .dev_disconnect = snd_jack_dev_disconnect,
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+ .dev_disconnect = snd_jack_dev_disconnect,
};
if (initial_kctl) {
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 12aa1cef11a1..cc5db93b9132 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -349,6 +349,16 @@ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_sync_stop);
+
/**
* snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
* @substream: PCM substream
@@ -358,6 +368,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_PAUSED)
+ dmaengine_terminate_async(prtd->dma_chan);
dmaengine_synchronize(prtd->dma_chan);
kfree(prtd);
@@ -378,6 +394,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_PAUSED)
+ dmaengine_terminate_async(prtd->dma_chan);
dmaengine_synchronize(prtd->dma_chan);
dma_release_channel(prtd->dma_chan);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 521ba56392a0..c152ccf32214 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1775,6 +1775,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
+ return -EBADFD;
if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
return -ENOSYS;
runtime->trigger_master = substream;
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index ee6ac649df83..e90b27a135e6 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
struct snd_seq_event *ev)
{
- ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
+ ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
}
/* Encoders for 0xf0 - 0xff */
@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
+ struct snd_seq_ump_midi2_bank *cc;
ev_cvt = *event;
memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
break;
case UMP_MSG_STATUS_CC:
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ switch (midi1->cc.index) {
+ case UMP_CC_BANK_SELECT:
+ cc->bank_set = 1;
+ cc->cc_bank_msb = midi1->cc.data;
+ return 0; // skip
+ case UMP_CC_BANK_SELECT_LSB:
+ cc->bank_set = 1;
+ cc->cc_bank_lsb = midi1->cc.data;
+ return 0; // skip
+ }
midi2->cc.index = midi1->cc.index;
midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
midi2->pg.program = midi1->pg.program;
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ if (cc->bank_set) {
+ midi2->pg.bank_valid = 1;
+ midi2->pg.bank_msb = cc->cc_bank_msb;
+ midi2->pg.bank_lsb = cc->cc_bank_lsb;
+ cc->bank_set = 0;
+ }
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
+ int err;
u16 v;
ev_cvt = *event;
@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
+ if (midi2->pg.bank_valid) {
+ midi1->cc.status = UMP_MSG_STATUS_CC;
+ midi1->cc.index = UMP_CC_BANK_SELECT;
+ midi1->cc.data = midi2->pg.bank_msb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
+ midi1->cc.data = midi2->pg.bank_lsb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->note.status = midi2->note.status;
+ }
midi1->pg.program = midi2->pg.program;
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
return 1;
}
@@ -701,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
data->system.parm1 = event->data.control.value & 0x7f;
return 1;
@@ -712,9 +752,10 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
- data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
- data->system.parm2 = event->data.control.value & 0x7f;
+ data->system.parm1 = event->data.control.value & 0x7f;
+ data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
return 1;
}
@@ -750,7 +791,8 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
/* set up the MIDI2 RPN/NRPN packet data from the parsed info */
static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
- union snd_ump_midi2_msg *data)
+ union snd_ump_midi2_msg *data,
+ unsigned char channel)
{
if (cc->rpn_set) {
data->rpn.status = UMP_MSG_STATUS_RPN;
@@ -767,6 +809,7 @@ static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
}
data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
cc->cc_data_lsb);
+ data->rpn.channel = channel;
cc->cc_data_msb = cc->cc_data_lsb = 0;
}
@@ -814,7 +857,7 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_data_lsb = val;
if (!(cc->rpn_set || cc->nrpn_set))
return 0; // skip
- fill_rpn(cc, data);
+ fill_rpn(cc, data, channel);
return 1;
}
@@ -854,7 +897,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
data->pg.bank_msb = cc->cc_bank_msb;
data->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
return 1;
}
@@ -917,7 +959,7 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_data_lsb = lsb;
if (!(cc->rpn_set || cc->nrpn_set))
return 0; // skip
- fill_rpn(cc, data);
+ fill_rpn(cc, data, channel);
return 1;
}
@@ -978,7 +1020,7 @@ static int system_2p_ev_to_ump_midi2(const struct snd_seq_event *event,
union snd_ump_midi2_msg *data,
unsigned char status)
{
- return system_1p_ev_to_ump_midi1(event, dest_port,
+ return system_2p_ev_to_ump_midi1(event, dest_port,
(union snd_ump_midi1_msg *)data,
status);
}
@@ -1035,6 +1077,8 @@ static const struct seq_ev_to_ump seq_ev_ump_encoders[] = {
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
{ SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING,
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ { SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET,
+ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
};
static const struct seq_ev_to_ump *find_ump_encoder(int type)
diff --git a/sound/core/ump.c b/sound/core/ump.c
index fd6a68a54278..3f61220c23b4 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
*/
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
{
+ unsigned int type;
+
protocol &= ump->info.protocol_caps;
if (protocol == ump->info.protocol)
return 0;
+ type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
+ if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
+ type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ return 0;
+
ump->info.protocol = protocol;
ump_dbg(ump, "New protocol = %x (caps = %x)\n",
protocol, ump->info.protocol_caps);
@@ -960,6 +967,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
if (err < 0)
ump_dbg(ump, "Unable to get UMP EP stream config\n");
+ /* If no protocol is set by some reason, assume the valid one */
+ if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
+ if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
+ else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
+ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
+ }
+
/* Query and create blocks from Function Blocks */
for (blk = 0; blk < ump->info.num_blocks; blk++) {
err = create_block_from_fb_info(ump, blk);
diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
index de04799fdb69..f67c44c83fde 100644
--- a/sound/core/ump_convert.c
+++ b/sound/core/ump_convert.c
@@ -404,7 +404,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
midi2->pg.bank_msb = cc->cc_bank_msb;
midi2->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index 5cfd0e99a13f..b1b333d1cf39 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -882,7 +882,6 @@ static struct parport_driver mts64_parport_driver = {
.probe = snd_mts64_dev_probe,
.match_port = snd_mts64_attach,
.detach = snd_mts64_detach,
- .devmodel = true,
};
/*********************************************************************
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index 619e3f594477..6fd9e8870021 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -668,7 +668,6 @@ static struct parport_driver portman_parport_driver = {
.probe = snd_portman_dev_probe,
.match_port = snd_portman_attach,
.detach = snd_portman_detach,
- .devmodel = true,
};
/*********************************************************************
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index cfdb1b73c88c..478d2b50c571 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -18,7 +18,7 @@
static int dsp_driver;
module_param(dsp_driver, int, 0444);
-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
+MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
#define FLAG_SST BIT(0)
#define FLAG_SOF BIT(1)
@@ -668,7 +668,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
return SND_INTEL_DSP_DRIVER_LEGACY;
}
- dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+ dev_dbg(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
/* find the configuration for the specific device */
cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table));
@@ -678,12 +678,12 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
if (cfg->flags & FLAG_SOF) {
if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE &&
snd_intel_dsp_check_soundwire(pci) > 0) {
- dev_info(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
+ dev_info_once(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
return SND_INTEL_DSP_DRIVER_SOF;
}
if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC &&
snd_intel_dsp_check_dmic(pci)) {
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+ dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
return SND_INTEL_DSP_DRIVER_SOF;
}
if (!(cfg->flags & FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE))
@@ -694,7 +694,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
if (cfg->flags & FLAG_SST) {
if (cfg->flags & FLAG_SST_ONLY_IF_DMIC) {
if (snd_intel_dsp_check_dmic(pci)) {
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
+ dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
return SND_INTEL_DSP_DRIVER_SST;
}
} else {
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 164335d3c200..4b1baf4dd50e 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -204,6 +204,7 @@ module_param(numWriteBufs, int, 0);
static unsigned int writeBufSize = DEFAULT_BUFF_SIZE ; /* in bytes */
module_param(writeBufSize, int, 0);
+MODULE_DESCRIPTION("Atari/Amiga/Q40 core DMA sound driver");
MODULE_LICENSE("GPL");
static int sq_unit = -1;
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 0da625533afc..a3cf0725fc43 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -162,6 +162,7 @@ config SND_HDA_SCODEC_CS35L56_I2C
depends on ACPI || COMPILE_TEST
depends on SND_SOC
select FW_CS_DSP
+ imply SERIAL_MULTI_INSTANTIATE
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
@@ -178,6 +179,7 @@ config SND_HDA_SCODEC_CS35L56_SPI
depends on ACPI || COMPILE_TEST
depends on SND_SOC
select FW_CS_DSP
+ imply SERIAL_MULTI_INSTANTIATE
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index 6c49e5c6cd20..031703f010be 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -1495,7 +1495,7 @@ static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *
if (comps[cs35l41->index].dev == dev) {
memset(&comps[cs35l41->index], 0, sizeof(*comps));
sleep_flags = lock_system_sleep();
- device_link_remove(&comps->codec->core.dev, cs35l41->dev);
+ device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev);
unlock_system_sleep(sleep_flags);
}
}
@@ -2019,6 +2019,8 @@ void cs35l41_hda_remove(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
+
pm_runtime_get_sync(cs35l41->dev);
pm_runtime_dont_use_autosuspend(cs35l41->dev);
pm_runtime_disable(cs35l41->dev);
@@ -2026,8 +2028,6 @@ void cs35l41_hda_remove(struct device *dev)
if (cs35l41->halo_initialized)
cs35l41_remove_dsp(cs35l41);
- component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
-
acpi_dev_put(cs35l41->dacpi);
pm_runtime_put_noidle(cs35l41->dev);
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 6a7a6d486916..51998d1c72ff 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -128,6 +128,10 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B7", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "17AA38C7", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 },
+ { "17AA38C8", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 },
+ { "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
+ { "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{}
};
@@ -529,6 +533,10 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "17AA38B5", generic_dsd_config },
{ "CSC3551", "17AA38B6", generic_dsd_config },
{ "CSC3551", "17AA38B7", generic_dsd_config },
+ { "CSC3551", "17AA38C7", generic_dsd_config },
+ { "CSC3551", "17AA38C8", generic_dsd_config },
+ { "CSC3551", "17AA38F9", generic_dsd_config },
+ { "CSC3551", "17AA38FA", generic_dsd_config },
{}
};
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index 11b0570ff56d..e134ede6c5aa 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -735,6 +735,8 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
if (comps[cs35l56->index].dev == dev)
memset(&comps[cs35l56->index], 0, sizeof(*comps));
+ cs35l56->codec = NULL;
+
dev_dbg(cs35l56->base.dev, "Unbound\n");
}
@@ -840,6 +842,9 @@ static int cs35l56_hda_system_resume(struct device *dev)
cs35l56->suspended = false;
+ if (!cs35l56->codec)
+ return 0;
+
ret = cs35l56_is_fw_reload_needed(&cs35l56->base);
dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret);
if (ret > 0) {
@@ -1072,12 +1077,12 @@ void cs35l56_hda_remove(struct device *dev)
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+ component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+
pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
pm_runtime_get_sync(cs35l56->base.dev);
pm_runtime_disable(cs35l56->base.dev);
- component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
-
cs_dsp_remove(&cs35l56->cs_dsp);
kfree(cs35l56->system_name);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e3c0b9d5552d..811e82474200 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -583,10 +583,14 @@ static void alc_shutup_pins(struct hda_codec *codec)
switch (codec->core.vendor_id) {
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
case 0x10ec0283:
+ case 0x10ec0285:
case 0x10ec0286:
+ case 0x10ec0287:
case 0x10ec0288:
+ case 0x10ec0295:
case 0x10ec0298:
alc_headset_mic_no_shutup(codec);
break;
@@ -7520,6 +7524,8 @@ enum {
ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1,
ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318,
ALC256_FIXUP_CHROME_BOOK,
+ ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7,
+ ALC287_FIXUP_LENOVO_SSID_17AA3820,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -7559,6 +7565,21 @@ static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
__snd_hda_apply_fixup(codec, id, action, 0);
}
+/* Similar to above the Lenovo Yoga Pro 7 14ARP8 PCI SSID matches the codec SSID of the
+ Legion Y9000X 2022 IAH7.*/
+static void alc287_fixup_lenovo_14arp8_legion_iah7(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ int id;
+
+ if (codec->core.subsystem_id == 0x17aa386e)
+ id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion Y9000X 2022 IAH7 */
+ else
+ id = ALC285_FIXUP_SPEAKER2_TO_DAC1; /* Yoga Pro 7 14ARP8 */
+ __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
/* Another hilarious PCI SSID conflict with Lenovo Legion Pro 7 16ARX8H (with
* TAS2781 codec) and Legion 7i 16IAX7 (with CS35L41 codec);
* we apply a corresponding fixup depending on the codec SSID instead
@@ -7576,6 +7597,20 @@ static void alc287_fixup_lenovo_legion_7(struct hda_codec *codec,
__snd_hda_apply_fixup(codec, id, action, 0);
}
+/* Yet more conflicting PCI SSID (17aa:3820) on two Lenovo models */
+static void alc287_fixup_lenovo_ssid_17aa3820(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ int id;
+
+ if (codec->core.subsystem_id == 0x17aa3820)
+ id = ALC269_FIXUP_ASPIRE_HEADSET_MIC; /* IdeaPad 330-17IKB 81DM */
+ else /* 0x17aa3802 */
+ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* "Yoga Duet 7 13ITL6 */
+ __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_GPIO2] = {
.type = HDA_FIXUP_FUNC,
@@ -9658,6 +9693,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
},
+ [ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_lenovo_14arp8_legion_iah7,
+ },
[ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin,
@@ -9808,6 +9847,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC225_FIXUP_HEADSET_JACK
},
+ [ALC287_FIXUP_LENOVO_SSID_17AA3820] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_lenovo_ssid_17aa3820,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10045,6 +10088,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -10194,6 +10238,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
@@ -10310,7 +10361,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -10502,7 +10553,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
- SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330 / Yoga Duet 7", ALC287_FIXUP_LENOVO_SSID_17AA3820),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
@@ -10516,7 +10567,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
- SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7 / Yoga Pro 7 14ARP8", ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7),
SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10527,6 +10578,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
@@ -10540,10 +10592,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
+ SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -10581,6 +10637,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
@@ -10605,7 +10662,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 75f7674c66ee..fdee6592c502 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -777,11 +777,11 @@ static void tas2781_hda_remove(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ component_del(tas_hda->dev, &tas2781_hda_comp_ops);
+
pm_runtime_get_sync(tas_hda->dev);
pm_runtime_disable(tas_hda->dev);
- component_del(tas_hda->dev, &tas2781_hda_comp_ops);
-
pm_runtime_put_noidle(tas_hda->dev);
tasdevice_remove(tas_hda->priv);
diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
index 60cbc881be6e..ef12f97ddc69 100644
--- a/sound/soc/amd/acp/acp-i2s.c
+++ b/sound/soc/amd/acp/acp-i2s.c
@@ -588,20 +588,12 @@ static int acp_i2s_probe(struct snd_soc_dai *dai)
{
struct device *dev = dai->component->dev;
struct acp_dev_data *adata = dev_get_drvdata(dev);
- struct acp_resource *rsrc = adata->rsrc;
- unsigned int val;
if (!adata->acp_base) {
dev_err(dev, "I2S base is NULL\n");
return -EINVAL;
}
- val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
- if (val != rsrc->i2s_mode) {
- dev_err(dev, "I2S Mode not supported val %x\n", val);
- return -EINVAL;
- }
-
return 0;
}
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index ad320b29e87d..777b5a78d8a9 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -100,6 +100,7 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
ret = -EINVAL;
goto release_regions;
}
+ chip->flag = flag;
dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dmic_dev)) {
dev_err(dev, "failed to create DMIC device\n");
@@ -139,7 +140,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
}
}
- chip->flag = flag;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.name = chip->name;
@@ -199,10 +199,12 @@ static int __maybe_unused snd_acp_resume(struct device *dev)
ret = acp_init(chip);
if (ret)
dev_err(dev, "ACP init failed\n");
- child = chip->chip_pdev->dev;
- adata = dev_get_drvdata(&child);
- if (adata)
- acp_enable_interrupts(adata);
+ if (chip->chip_pdev) {
+ child = chip->chip_pdev->dev;
+ adata = dev_get_drvdata(&child);
+ if (adata)
+ acp_enable_interrupts(adata);
+ }
return ret;
}
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 1760b5d42460..4e3a8ce690a4 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M5602RA"),
+ }
+ },
{
.driver_data = &acp6x_card,
.matches = {
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 6aed1ee443b4..ba314b279919 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -473,19 +473,22 @@ static int atmel_classd_asoc_card_init(struct device *dev,
if (!dai_link)
return -ENOMEM;
- comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ comp = devm_kzalloc(dev, 2 * sizeof(*comp), GFP_KERNEL);
if (!comp)
return -ENOMEM;
- dai_link->cpus = comp;
+ dai_link->cpus = &comp[0];
dai_link->codecs = &snd_soc_dummy_dlc;
+ dai_link->platforms = &comp[1];
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
+ dai_link->num_platforms = 1;
dai_link->name = "CLASSD";
dai_link->stream_name = "CLASSD PCM";
dai_link->cpus->dai_name = dev_name(dev);
+ dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index 8af89a263594..30497152e02a 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -215,6 +215,10 @@ static const struct reg_sequence cs35l56_asp1_defaults[] = {
REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL5, 0x00020100),
REG_SEQ0(CS35L56_ASP1_DATA_CONTROL1, 0x00000018),
REG_SEQ0(CS35L56_ASP1_DATA_CONTROL5, 0x00000018),
+ REG_SEQ0(CS35L56_ASP1TX1_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX2_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX3_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX4_INPUT, 0x00000000),
};
/*
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 901b9dbcf585..d9ab003e166b 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -121,7 +121,7 @@ int cs42l43_set_jack(struct snd_soc_component *component,
priv->buttons[3] = 735;
}
- ret = cs42l43_find_index(priv, "cirrus,detect-us", 1000, &priv->detect_us,
+ ret = cs42l43_find_index(priv, "cirrus,detect-us", 50000, &priv->detect_us,
cs42l43_accdet_us, ARRAY_SIZE(cs42l43_accdet_us));
if (ret < 0)
goto error;
@@ -433,7 +433,7 @@ irqreturn_t cs42l43_button_press(int irq, void *data)
// Wait for 2 full cycles of comb filter to ensure good reading
queue_delayed_work(system_wq, &priv->button_press_work,
- msecs_to_jiffies(10));
+ msecs_to_jiffies(20));
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 94685449f0f4..92674314227c 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -310,8 +310,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
struct snd_soc_component *component = dai->component;
struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
struct cs42l43 *cs42l43 = priv->core;
- int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
- CS42L43_ASP_MASTER_MODE_MASK);
+ int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
+ CS42L43_ASP_CLK_CONFIG2,
+ CS42L43_ASP_MASTER_MODE_MASK);
if (provider)
priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index 03b539ba540f..6a4e42e5e35b 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -857,12 +857,16 @@ static void es8326_jack_detect_handler(struct work_struct *work)
* set auto-check mode, then restart jack_detect_work after 400ms.
* Don't report jack status.
*/
- regmap_write(es8326->regmap, ES8326_INT_SOURCE,
- (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE, 0x00);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+ regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x00);
es8326_enable_micbias(es8326->component);
usleep_range(50000, 70000);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
+ regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x10);
+ usleep_range(50000, 70000);
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE,
+ (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
queue_delayed_work(system_wq, &es8326->jack_detect_work,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index cdb7ff7020e9..51187b1e0ed2 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -81,7 +81,7 @@ static const struct reg_sequence init_list[] = {
static const struct reg_sequence rt5650_init_list[] = {
{0xf6, 0x0100},
{RT5645_PWR_ANLG1, 0x02},
- {RT5645_IL_CMD3, 0x0018},
+ {RT5645_IL_CMD3, 0x6728},
};
static const struct reg_default rt5645_reg[] = {
@@ -3130,20 +3130,32 @@ static void rt5645_enable_push_button_irq(struct snd_soc_component *component,
bool enable)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ int ret;
if (enable) {
snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
snd_soc_dapm_sync(dapm);
+ snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2,
+ RT5645_EN_4BTN_IL_MASK | RT5645_RST_4BTN_IL_MASK,
+ RT5645_EN_4BTN_IL_EN | RT5645_RST_4BTN_IL_RST);
+ usleep_range(10000, 15000);
+ snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2,
+ RT5645_EN_4BTN_IL_MASK | RT5645_RST_4BTN_IL_MASK,
+ RT5645_EN_4BTN_IL_EN | RT5645_RST_4BTN_IL_NORM);
+ msleep(50);
+ ret = snd_soc_component_read(component, RT5645_INT_IRQ_ST);
+ pr_debug("%s read %x = %x\n", __func__, RT5645_INT_IRQ_ST,
+ snd_soc_component_read(component, RT5645_INT_IRQ_ST));
+ snd_soc_component_write(component, RT5645_INT_IRQ_ST, ret);
+ ret = snd_soc_component_read(component, RT5650_4BTN_IL_CMD1);
+ pr_debug("%s read %x = %x\n", __func__, RT5650_4BTN_IL_CMD1,
+ snd_soc_component_read(component, RT5650_4BTN_IL_CMD1));
+ snd_soc_component_write(component, RT5650_4BTN_IL_CMD1, ret);
snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD1, 0x3, 0x3);
snd_soc_component_update_bits(component,
RT5645_INT_IRQ_ST, 0x8, 0x8);
- snd_soc_component_update_bits(component,
- RT5650_4BTN_IL_CMD2, 0x8000, 0x8000);
- snd_soc_component_read(component, RT5650_4BTN_IL_CMD1);
- pr_debug("%s read %x = %x\n", __func__, RT5650_4BTN_IL_CMD1,
- snd_soc_component_read(component, RT5650_4BTN_IL_CMD1));
} else {
snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
snd_soc_component_update_bits(component, RT5645_INT_IRQ_ST, 0x8, 0x0);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 90816b2c5489..bef74b29fd54 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2011,6 +2011,12 @@
#define RT5645_ZCD_HP_DIS (0x0 << 15)
#define RT5645_ZCD_HP_EN (0x1 << 15)
+/* Buttons Inline Command Function 2 (0xe0) */
+#define RT5645_EN_4BTN_IL_MASK (0x1 << 15)
+#define RT5645_EN_4BTN_IL_EN (0x1 << 15)
+#define RT5645_RST_4BTN_IL_MASK (0x1 << 14)
+#define RT5645_RST_4BTN_IL_RST (0x0 << 14)
+#define RT5645_RST_4BTN_IL_NORM (0x1 << 14)
/* Codec Private Register definition */
/* DAC ADC Digital Volume (0x00) */
diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
index b33da2215ade..87354bb1564e 100644
--- a/sound/soc/codecs/rt722-sdca-sdw.c
+++ b/sound/soc/codecs/rt722-sdca-sdw.c
@@ -68,6 +68,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
case 0x200007f:
case 0x2000082 ... 0x200008e:
case 0x2000090 ... 0x2000094:
+ case 0x3110000:
case 0x5300000 ... 0x5300002:
case 0x5400002:
case 0x5600000 ... 0x5600007:
@@ -125,6 +126,7 @@ static bool rt722_sdca_mbq_volatile_register(struct device *dev, unsigned int re
case 0x2000067:
case 0x2000084:
case 0x2000086:
+ case 0x3110000:
return true;
default:
return false;
@@ -350,7 +352,7 @@ static int rt722_sdca_interrupt_callback(struct sdw_slave *slave,
if (status->sdca_cascade && !rt722->disable_irq)
mod_delayed_work(system_power_efficient_wq,
- &rt722->jack_detect_work, msecs_to_jiffies(30));
+ &rt722->jack_detect_work, msecs_to_jiffies(280));
mutex_unlock(&rt722->disable_irq_lock);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index c9d9a7b28efb..68d2d6444533 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -2085,5 +2085,6 @@ static const struct cs_dsp_client_ops wm_adsp2_client_ops = {
.watchdog_expired = wm_adsp_fatal_error,
};
+MODULE_DESCRIPTION("Cirrus Logic ASoC DSP Support");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(FW_CS_DSP);
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 5ddc0c2fe53f..eb67689dcd6e 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -559,6 +559,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->pdev = pdev;
+
cpu_np = of_parse_phandle(np, "audio-cpu", 0);
/* Give a chance to old DT binding */
if (!cpu_np)
@@ -787,7 +789,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
}
/* Initialize sound card */
- priv->pdev = pdev;
priv->card.dev = &pdev->dev;
priv->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&priv->card, "model");
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 14e94270911c..4fa208d6a032 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -50,4 +50,5 @@ int imx_pcm_dma_init(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
+MODULE_DESCRIPTION("Freescale i.MX PCM DMA interface");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 02bae207f6ec..b6c5d94a1554 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -1545,8 +1545,8 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
{
struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev);
size_t len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
- char buf[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int ssp_port, tdm_slot;
+ char *buf;
/* See parse_link_formatted_string() for dynamic naming when(s). */
if (!avs_mach_singular_ssp(mach))
@@ -1557,13 +1557,24 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
return 0;
tdm_slot = avs_mach_ssp_tdm(mach, ssp_port);
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->source, ssp_port, tdm_slot);
- strscpy((char *)route->source, buf, len);
+ route->source = buf;
+
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->sink, ssp_port, tdm_slot);
- strscpy((char *)route->sink, buf, len);
+ route->sink = buf;
+
if (route->control) {
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->control, ssp_port, tdm_slot);
- strscpy((char *)route->control, buf, len);
+ route->control = buf;
}
return 0;
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 3ed81ab649c5..4e0586034de4 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -652,7 +652,7 @@ if SND_SOC_SOF_INTEL_SOUNDWIRE
config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
tristate "SoundWire generic machine driver"
- depends on I2C && ACPI
+ depends on I2C && SPI_MASTER && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
depends on SOUNDWIRE
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index b41a1147f1c3..a64d1989e28a 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -613,6 +613,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 CESIUM"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"),
},
.driver_data = (void *)(BYT_RT5640_IN1_MAP |
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
index 48252fa9e39e..8e0ae3635a35 100644
--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -293,7 +293,7 @@ static const struct snd_soc_acpi_adr_device rt1318_1_single_adr[] = {
.adr = 0x000130025D131801,
.num_endpoints = 1,
.endpoints = &single_endpoint,
- .name_prefix = "rt1318"
+ .name_prefix = "rt1318-1"
}
};
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
index acaf81fd6c9b..f848e14b091a 100644
--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
@@ -31,7 +31,7 @@ struct mt8183_da7219_max98357_priv {
static struct snd_soc_jack_pin mt8183_da7219_max98357_jack_pins[] = {
{
- .pin = "Headphone",
+ .pin = "Headphones",
.mask = SND_JACK_HEADPHONE,
},
{
@@ -626,7 +626,7 @@ static struct snd_soc_codec_conf mt6358_codec_conf[] = {
};
static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Speakers"),
SOC_DAPM_PIN_SWITCH("Line Out"),
@@ -634,7 +634,7 @@ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
static const
struct snd_soc_dapm_widget mt8183_da7219_max98357_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Speakers", NULL),
SND_SOC_DAPM_SPK("Line Out", NULL),
@@ -680,7 +680,7 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = {
};
static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Left Spk"),
SOC_DAPM_PIN_SWITCH("Right Spk"),
@@ -689,7 +689,7 @@ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
static const
struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Left Spk", NULL),
SND_SOC_DAPM_SPK("Right Spk", NULL),
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
index ca8751190520..2832ef78eaed 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
@@ -827,6 +827,7 @@ SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index df2e4be992d2..9bb08cadeb18 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -43,4 +43,5 @@ int mxs_pcm_platform_register(struct device *dev)
}
EXPORT_SYMBOL_GPL(mxs_pcm_platform_register);
+MODULE_DESCRIPTION("MXS ASoC PCM driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
index 68a38f63a2db..66b911b49e3f 100644
--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
@@ -141,14 +141,17 @@ static void q6apm_lpass_dai_shutdown(struct snd_pcm_substream *substream, struct
struct q6apm_lpass_dai_data *dai_data = dev_get_drvdata(dai->dev);
int rc;
- if (!dai_data->is_port_started[dai->id])
- return;
- rc = q6apm_graph_stop(dai_data->graph[dai->id]);
- if (rc < 0)
- dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
+ if (dai_data->is_port_started[dai->id]) {
+ rc = q6apm_graph_stop(dai_data->graph[dai->id]);
+ dai_data->is_port_started[dai->id] = false;
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
+ }
- q6apm_graph_close(dai_data->graph[dai->id]);
- dai_data->is_port_started[dai->id] = false;
+ if (dai_data->graph[dai->id]) {
+ q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ }
}
static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
@@ -163,8 +166,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
q6apm_graph_stop(dai_data->graph[dai->id]);
dai_data->is_port_started[dai->id] = false;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ }
}
/**
@@ -183,26 +188,29 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
cfg->direction = substream->stream;
rc = q6apm_graph_media_format_pcm(dai_data->graph[dai->id], cfg);
-
if (rc) {
dev_err(dai->dev, "Failed to set media format %d\n", rc);
- return rc;
+ goto err;
}
rc = q6apm_graph_prepare(dai_data->graph[dai->id]);
if (rc) {
dev_err(dai->dev, "Failed to prepare Graph %d\n", rc);
- return rc;
+ goto err;
}
rc = q6apm_graph_start(dai_data->graph[dai->id]);
if (rc < 0) {
dev_err(dai->dev, "fail to start APM port %x\n", dai->id);
- return rc;
+ goto err;
}
dai_data->is_port_started[dai->id] = true;
return 0;
+err:
+ q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ return rc;
}
static int q6apm_lpass_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
index eaa8bb016e50..f2eda2ff46c0 100644
--- a/sound/soc/qcom/sdw.c
+++ b/sound/soc/qcom/sdw.c
@@ -160,4 +160,5 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
return 0;
}
EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+MODULE_DESCRIPTION("Qualcomm ASoC SoundWire helper functions");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 9fa020ef7eab..ee517d7b5b7b 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -655,8 +655,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
int err;
if (i2s_tdm->is_master_mode) {
- struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
+ struct clk *mclk;
+
+ if (i2s_tdm->clk_trcm == TRCM_TX) {
+ mclk = i2s_tdm->mclk_tx;
+ } else if (i2s_tdm->clk_trcm == TRCM_RX) {
+ mclk = i2s_tdm->mclk_rx;
+ } else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mclk = i2s_tdm->mclk_tx;
+ } else {
+ mclk = i2s_tdm->mclk_rx;
+ }
err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
if (err)
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index ea3bc9318412..a63e942fdc0b 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -318,6 +318,12 @@ static int dmaengine_copy(struct snd_soc_component *component,
return 0;
}
+static int dmaengine_pcm_sync_stop(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_sync_stop(substream);
+}
+
static const struct snd_soc_component_driver dmaengine_pcm_component = {
.name = SND_DMAENGINE_PCM_DRV_NAME,
.probe_order = SND_SOC_COMP_ORDER_LATE,
@@ -327,6 +333,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component = {
.trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
.pcm_construct = dmaengine_pcm_new,
+ .sync_stop = dmaengine_pcm_sync_stop,
};
static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
@@ -339,6 +346,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
.pointer = dmaengine_pcm_pointer,
.copy = dmaengine_copy,
.pcm_construct = dmaengine_pcm_new,
+ .sync_stop = dmaengine_pcm_sync_stop,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 90ca37e008b3..6951ff7bc61e 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1021,6 +1021,7 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
struct snd_soc_dapm_context *dapm = &tplg->comp->dapm;
+ const size_t maxlen = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
struct snd_soc_tplg_dapm_graph_elem *elem;
struct snd_soc_dapm_route *route;
int count, i;
@@ -1044,31 +1045,27 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem);
/* validate routes */
- if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+ if ((strnlen(elem->source, maxlen) == maxlen) ||
+ (strnlen(elem->sink, maxlen) == maxlen) ||
+ (strnlen(elem->control, maxlen) == maxlen)) {
ret = -EINVAL;
break;
}
- if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
- ret = -EINVAL;
- break;
- }
- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
- ret = -EINVAL;
+
+ route->source = devm_kstrdup(tplg->dev, elem->source, GFP_KERNEL);
+ route->sink = devm_kstrdup(tplg->dev, elem->sink, GFP_KERNEL);
+ if (!route->source || !route->sink) {
+ ret = -ENOMEM;
break;
}
- route->source = elem->source;
- route->sink = elem->sink;
-
- /* set to NULL atm for tplg users */
- route->connected = NULL;
- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
- route->control = NULL;
- else
- route->control = elem->control;
+ if (strnlen(elem->control, maxlen) != 0) {
+ route->control = devm_kstrdup(tplg->dev, elem->control, GFP_KERNEL);
+ if (!route->control) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
/* add route dobj to dobj_list */
route->dobj.type = SND_SOC_DOBJ_GRAPH;
diff --git a/sound/soc/sof/amd/acp-common.c b/sound/soc/sof/amd/acp-common.c
index b26fa471b431..81bb93e98358 100644
--- a/sound/soc/sof/amd/acp-common.c
+++ b/sound/soc/sof/amd/acp-common.c
@@ -258,8 +258,8 @@ const struct snd_sof_dsp_ops sof_acp_common_ops = {
};
EXPORT_SYMBOL_NS(sof_acp_common_ops, SND_SOC_SOF_AMD_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP SOF COMMON Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
-MODULE_DESCRIPTION("ACP SOF COMMON Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index c12c7f820529..74fd5f2b148b 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -801,7 +801,7 @@ void amd_sof_acp_remove(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("AMD ACP sof driver");
MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
MODULE_IMPORT_NS(SND_AMD_SOUNDWIRE_ACPI);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp63.c b/sound/soc/sof/amd/acp63.c
index 9fb645079c3a..9e6eb4bfc805 100644
--- a/sound/soc/sof/amd/acp63.c
+++ b/sound/soc/sof/amd/acp63.c
@@ -140,7 +140,3 @@ int sof_acp63_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("ACP63 SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/pci-acp63.c b/sound/soc/sof/amd/pci-acp63.c
index eeaa12cceb23..fc8984447365 100644
--- a/sound/soc/sof/amd/pci-acp63.c
+++ b/sound/soc/sof/amd/pci-acp63.c
@@ -109,5 +109,6 @@ static struct pci_driver snd_sof_pci_amd_acp63_driver = {
module_pci_driver(snd_sof_pci_amd_acp63_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP63 SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
index 2f288545c426..4bc30951f8b0 100644
--- a/sound/soc/sof/amd/pci-rmb.c
+++ b/sound/soc/sof/amd/pci-rmb.c
@@ -99,5 +99,6 @@ static struct pci_driver snd_sof_pci_amd_rmb_driver = {
module_pci_driver(snd_sof_pci_amd_rmb_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("REMBRANDT SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
index a0195e9b400c..e08875bdfa8b 100644
--- a/sound/soc/sof/amd/pci-rn.c
+++ b/sound/soc/sof/amd/pci-rn.c
@@ -103,5 +103,6 @@ static struct pci_driver snd_sof_pci_amd_rn_driver = {
module_pci_driver(snd_sof_pci_amd_rn_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RENOIR SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c
index 5cd3ac84752f..16eb2994fbab 100644
--- a/sound/soc/sof/amd/pci-vangogh.c
+++ b/sound/soc/sof/amd/pci-vangogh.c
@@ -101,5 +101,6 @@ static struct pci_driver snd_sof_pci_amd_vgh_driver = {
module_pci_driver(snd_sof_pci_amd_vgh_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("VANGOGH SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/rembrandt.c b/sound/soc/sof/amd/rembrandt.c
index f1d1ba57ab3a..076f2f05a95c 100644
--- a/sound/soc/sof/amd/rembrandt.c
+++ b/sound/soc/sof/amd/rembrandt.c
@@ -140,7 +140,3 @@ int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("REMBRANDT SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/renoir.c b/sound/soc/sof/amd/renoir.c
index 47b863f6258c..aa2d24dac6f5 100644
--- a/sound/soc/sof/amd/renoir.c
+++ b/sound/soc/sof/amd/renoir.c
@@ -115,7 +115,3 @@ int sof_renoir_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("RENOIR SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c
index bc6ffdb5471a..61372958c09d 100644
--- a/sound/soc/sof/amd/vangogh.c
+++ b/sound/soc/sof/amd/vangogh.c
@@ -161,7 +161,3 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("VANGOGH SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 0a4917136ff9..83fe0401baf8 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -769,7 +769,7 @@ void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
EXPORT_SYMBOL(sof_machine_unregister);
MODULE_AUTHOR("Liam Girdwood");
-MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
MODULE_ALIAS("platform:sof-audio");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/imx/imx-common.c b/sound/soc/sof/imx/imx-common.c
index 2981aea123d9..fce6d9cf6a6b 100644
--- a/sound/soc/sof/imx/imx-common.c
+++ b/sound/soc/sof/imx/imx-common.c
@@ -75,3 +75,4 @@ void imx8_dump(struct snd_sof_dev *sdev, u32 flags)
EXPORT_SYMBOL(imx8_dump);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for IMX platforms");
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 3021dc87ab5a..9f24e3c283dd 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -667,5 +667,6 @@ static struct platform_driver snd_sof_of_imx8_driver = {
};
module_platform_driver(snd_sof_of_imx8_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8 platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index 4ed415f04345..1c7019c3cbd3 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -514,5 +514,6 @@ static struct platform_driver snd_sof_of_imx8m_driver = {
};
module_platform_driver(snd_sof_of_imx8m_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8M platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/imx/imx8ulp.c b/sound/soc/sof/imx/imx8ulp.c
index 8adfdd00413a..2585b1beef23 100644
--- a/sound/soc/sof/imx/imx8ulp.c
+++ b/sound/soc/sof/imx/imx8ulp.c
@@ -516,5 +516,6 @@ static struct platform_driver snd_sof_of_imx8ulp_driver = {
};
module_platform_driver(snd_sof_of_imx8ulp_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8ULP platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
index 86af4e9a716e..3505ac3a1b14 100644
--- a/sound/soc/sof/intel/atom.c
+++ b/sound/soc/sof/intel/atom.c
@@ -418,3 +418,4 @@ void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Atom platforms");
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 3262286a9a9d..7f18080e4e19 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -694,6 +694,7 @@ static struct platform_driver snd_sof_acpi_intel_bdw_driver = {
module_platform_driver(snd_sof_acpi_intel_bdw_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Broadwell platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index d78d11d4cfbf..7a57e162fb1c 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -475,6 +475,7 @@ static struct platform_driver snd_sof_acpi_intel_byt_driver = {
module_platform_driver(snd_sof_acpi_intel_byt_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Baytrail/Cherrytrail");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index da3db3ed379e..dc46888faa0d 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -457,3 +457,4 @@ EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
#endif
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio codecs");
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
index 262b482dc0a8..b9a02750ce61 100644
--- a/sound/soc/sof/intel/hda-ctrl.c
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -328,6 +328,7 @@ void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
}
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for HDaudio platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index ce675c22a5ab..c61d298ea6b3 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -379,7 +379,7 @@ static int non_hda_dai_hw_params_data(struct snd_pcm_substream *substream,
sdev = widget_to_sdev(w);
if (sdev->dspless_mode_selected)
- goto skip_tlv;
+ return 0;
/* get stream_id */
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
@@ -423,7 +423,6 @@ static int non_hda_dai_hw_params_data(struct snd_pcm_substream *substream,
dma_config->dma_stream_channel_map.device_count = 1;
dma_config->dma_priv_config_size = 0;
-skip_tlv:
return 0;
}
@@ -525,6 +524,9 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ if (sdev->dspless_mode_selected)
+ return 0;
+
ipc4_copier = widget_to_copier(w);
dma_config_tlv = &ipc4_copier->dma_config_tlv[cpu_dai_id];
dma_config = &dma_config_tlv->dma_config;
diff --git a/sound/soc/sof/intel/hda-mlink.c b/sound/soc/sof/intel/hda-mlink.c
index 04bbc5c9904c..9a3559c78b62 100644
--- a/sound/soc/sof/intel/hda-mlink.c
+++ b/sound/soc/sof/intel/hda-mlink.c
@@ -972,3 +972,4 @@ EXPORT_SYMBOL_NS(hdac_bus_eml_enable_offload, SND_SOC_SOF_HDA_MLINK);
#endif
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio multi-link");
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index e6a38de0a0aa..dead1c19558b 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1522,6 +1522,7 @@ void hda_unregister_clients(struct snd_sof_dev *sdev)
}
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
index df6d897da290..f006dcf5458a 100644
--- a/sound/soc/sof/intel/pci-apl.c
+++ b/sound/soc/sof/intel/pci-apl.c
@@ -105,6 +105,7 @@ static struct pci_driver snd_sof_pci_intel_apl_driver = {
module_pci_driver(snd_sof_pci_intel_apl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ApolloLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
index a39fa3657d55..a8406342f08b 100644
--- a/sound/soc/sof/intel/pci-cnl.c
+++ b/sound/soc/sof/intel/pci-cnl.c
@@ -143,6 +143,7 @@ static struct pci_driver snd_sof_pci_intel_cnl_driver = {
module_pci_driver(snd_sof_pci_intel_cnl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for CannonLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
index 9f1fe47475fb..25effca50d9f 100644
--- a/sound/soc/sof/intel/pci-icl.c
+++ b/sound/soc/sof/intel/pci-icl.c
@@ -108,6 +108,7 @@ static struct pci_driver snd_sof_pci_intel_icl_driver = {
module_pci_driver(snd_sof_pci_intel_icl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IceLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c
index 68e5c90151b2..602c574064eb 100644
--- a/sound/soc/sof/intel/pci-lnl.c
+++ b/sound/soc/sof/intel/pci-lnl.c
@@ -70,6 +70,7 @@ static struct pci_driver snd_sof_pci_intel_lnl_driver = {
module_pci_driver(snd_sof_pci_intel_lnl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for LunarLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_MTL);
diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
index c685cb8d6171..8cb0333c033e 100644
--- a/sound/soc/sof/intel/pci-mtl.c
+++ b/sound/soc/sof/intel/pci-mtl.c
@@ -133,6 +133,7 @@ static struct pci_driver snd_sof_pci_intel_mtl_driver = {
module_pci_driver(snd_sof_pci_intel_mtl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MeteorLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
index 862da8009543..8ca0231d7e4f 100644
--- a/sound/soc/sof/intel/pci-skl.c
+++ b/sound/soc/sof/intel/pci-skl.c
@@ -89,6 +89,7 @@ static struct pci_driver snd_sof_pci_intel_skl_driver = {
module_pci_driver(snd_sof_pci_intel_skl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for SkyLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index f73bb47cd79e..ebe1a7d16689 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -317,6 +317,7 @@ static struct pci_driver snd_sof_pci_intel_tgl_driver = {
module_pci_driver(snd_sof_pci_intel_tgl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for TigerLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
index 5c3069588bb7..1375c393827e 100644
--- a/sound/soc/sof/intel/pci-tng.c
+++ b/sound/soc/sof/intel/pci-tng.c
@@ -244,6 +244,7 @@ static struct pci_driver snd_sof_pci_intel_tng_driver = {
module_pci_driver(snd_sof_pci_intel_tng_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Tangier platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 307bee63756b..4df2be3d39eb 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -650,7 +650,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
struct sof_ipc4_audio_format *ipc4_fmt;
struct sof_ipc4_copier *ipc4_copier;
- bool single_fmt = false;
+ bool single_bitdepth = false;
u32 valid_bits = 0;
int dir, ret;
@@ -682,18 +682,18 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- if (sof_ipc4_copier_is_single_format(sdev,
+ if (sof_ipc4_copier_is_single_bitdepth(sdev,
available_fmt->output_pin_fmts,
available_fmt->num_output_formats)) {
ipc4_fmt = &available_fmt->output_pin_fmts->audio_fmt;
- single_fmt = true;
+ single_bitdepth = true;
}
} else {
- if (sof_ipc4_copier_is_single_format(sdev,
+ if (sof_ipc4_copier_is_single_bitdepth(sdev,
available_fmt->input_pin_fmts,
available_fmt->num_input_formats)) {
ipc4_fmt = &available_fmt->input_pin_fmts->audio_fmt;
- single_fmt = true;
+ single_bitdepth = true;
}
}
}
@@ -703,7 +703,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
if (ret)
return ret;
- if (single_fmt) {
+ if (single_bitdepth) {
snd_mask_none(fmt);
valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(ipc4_fmt->fmt_cfg);
dev_dbg(component->dev, "Set %s to %d bit format\n", dai->name, valid_bits);
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index beff10989324..00987039c972 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -195,9 +195,10 @@ static void sof_ipc4_dbg_audio_format(struct device *dev, struct sof_ipc4_pin_fo
for (i = 0; i < num_formats; i++) {
struct sof_ipc4_audio_format *fmt = &pin_fmt[i].audio_fmt;
dev_dbg(dev,
- "Pin index #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
- pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
- fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
+ "Pin index #%d: %uHz, %ubit, %luch (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
+ pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth,
+ SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg),
+ fmt->ch_map, fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
pin_fmt[i].buffer_size);
}
}
@@ -217,6 +218,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
}
process = swidget->private;
+
+ /*
+ * For process modules without base config extension, base module config
+ * format is used for all input pins
+ */
+ if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
+ return &process->base_config.audio_fmt;
+
base_cfg_ext = process->base_config_ext;
/*
@@ -1422,7 +1431,7 @@ static int snd_sof_get_hw_config_params(struct snd_sof_dev *sdev, struct snd_sof
static int
snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
- bool single_format,
+ bool single_bitdepth,
struct snd_pcm_hw_params *params, u32 dai_index,
u32 linktype, u8 dir, u32 **dst, u32 *len)
{
@@ -1445,7 +1454,7 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
* Look for 32-bit blob first instead of 16-bit if copier
* supports multiple formats
*/
- if (bit_depth == 16 && !single_format) {
+ if (bit_depth == 16 && !single_bitdepth) {
dev_dbg(sdev->dev, "Looking for 32-bit blob first for DMIC\n");
format_change = true;
bit_depth = 32;
@@ -1483,6 +1492,8 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
dir, dev_type);
if (!cfg) {
+ bool get_new_blob = false;
+
if (format_change) {
/*
* The 32-bit blob was not found in NHLT table, try to
@@ -1490,7 +1501,20 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
*/
bit_depth = params_width(params);
format_change = false;
+ get_new_blob = true;
+ } else if (linktype == SOF_DAI_INTEL_DMIC && !single_bitdepth) {
+ /*
+ * The requested 32-bit blob (no format change for the
+ * blob request) was not found in NHLT table, try to
+ * look for 16-bit blob if the copier supports multiple
+ * formats
+ */
+ bit_depth = 16;
+ format_change = true;
+ get_new_blob = true;
+ }
+ if (get_new_blob) {
cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt,
dai_index, nhlt_type,
bit_depth, bit_depth,
@@ -1513,8 +1537,8 @@ out:
if (format_change) {
/*
- * Update the params to reflect that we have loaded 32-bit blob
- * instead of the 16-bit.
+ * Update the params to reflect that different blob was loaded
+ * instead of the requested bit depth (16 -> 32 or 32 -> 16).
* This information is going to be used by the caller to find
* matching copier format on the dai side.
*/
@@ -1522,7 +1546,11 @@ out:
m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_none(m);
- snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+ if (bit_depth == 16)
+ snd_mask_set_format(m, SNDRV_PCM_FORMAT_S16_LE);
+ else
+ snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+
}
return 0;
@@ -1530,7 +1558,7 @@ out:
#else
static int
snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
- bool single_format,
+ bool single_bitdepth,
struct snd_pcm_hw_params *params, u32 dai_index,
u32 linktype, u8 dir, u32 **dst, u32 *len)
{
@@ -1538,9 +1566,9 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
}
#endif
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
- struct sof_ipc4_pin_format *pin_fmts,
- u32 pin_fmts_size)
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size)
{
struct sof_ipc4_audio_format *fmt;
u32 valid_bits;
@@ -1564,14 +1592,65 @@ bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
}
static int
+sof_ipc4_adjust_params_to_dai_format(struct snd_sof_dev *sdev,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size)
+{
+ u32 params_mask = BIT(SNDRV_PCM_HW_PARAM_RATE) |
+ BIT(SNDRV_PCM_HW_PARAM_CHANNELS) |
+ BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+ struct sof_ipc4_audio_format *fmt;
+ u32 rate, channels, valid_bits;
+ int i;
+
+ fmt = &pin_fmts[0].audio_fmt;
+ rate = fmt->sampling_frequency;
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ /* check if parameters in topology defined formats are the same */
+ for (i = 1; i < pin_fmts_size; i++) {
+ u32 val;
+
+ fmt = &pin_fmts[i].audio_fmt;
+
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_RATE)) {
+ val = fmt->sampling_frequency;
+ if (val != rate)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_RATE);
+ }
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_CHANNELS)) {
+ val = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ if (val != channels)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_CHANNELS);
+ }
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_FORMAT)) {
+ val = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+ if (val != valid_bits)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+ }
+ }
+
+ if (params_mask)
+ return sof_ipc4_update_hw_params(sdev, params,
+ &pin_fmts[0].audio_fmt,
+ params_mask);
+
+ return 0;
+}
+
+static int
sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
struct snd_pcm_hw_params *params, int dir)
{
struct sof_ipc4_available_audio_format *available_fmt;
struct snd_pcm_hw_params dai_params = *params;
struct sof_ipc4_copier_data *copier_data;
+ struct sof_ipc4_pin_format *pin_fmts;
struct sof_ipc4_copier *ipc4_copier;
- bool single_format;
+ bool single_bitdepth;
+ u32 num_pin_fmts;
int ret;
ipc4_copier = dai->private;
@@ -1579,40 +1658,26 @@ sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
available_fmt = &ipc4_copier->available_fmt;
/*
- * If the copier on the DAI side supports only single bit depth then
- * this depth (format) should be used to look for the NHLT blob (if
- * needed) and in case of capture this should be used for the input
- * format lookup
+ * Fixup the params based on the format parameters of the DAI. If any
+ * of the RATE, CHANNELS, bit depth is static among the formats then
+ * narrow the params to only allow that specific parameter value.
*/
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- single_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->output_pin_fmts,
- available_fmt->num_output_formats);
-
- /* Update the dai_params with the only supported format */
- if (single_format) {
- ret = sof_ipc4_update_hw_params(sdev, &dai_params,
- &available_fmt->output_pin_fmts[0].audio_fmt,
- BIT(SNDRV_PCM_HW_PARAM_FORMAT));
- if (ret)
- return ret;
- }
+ pin_fmts = available_fmt->output_pin_fmts;
+ num_pin_fmts = available_fmt->num_output_formats;
} else {
- single_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->input_pin_fmts,
- available_fmt->num_input_formats);
-
- /* Update the dai_params with the only supported format */
- if (single_format) {
- ret = sof_ipc4_update_hw_params(sdev, &dai_params,
- &available_fmt->input_pin_fmts[0].audio_fmt,
- BIT(SNDRV_PCM_HW_PARAM_FORMAT));
- if (ret)
- return ret;
- }
+ pin_fmts = available_fmt->input_pin_fmts;
+ num_pin_fmts = available_fmt->num_input_formats;
}
- ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_format,
+ ret = sof_ipc4_adjust_params_to_dai_format(sdev, &dai_params, pin_fmts,
+ num_pin_fmts);
+ if (ret)
+ return ret;
+
+ single_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev, pin_fmts,
+ num_pin_fmts);
+ ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_bitdepth,
&dai_params,
ipc4_copier->dai_index,
ipc4_copier->dai_type, dir,
@@ -1647,7 +1712,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
u32 out_ref_rate, out_ref_channels;
u32 deep_buffer_dma_ms = 0;
int output_fmt_index;
- bool single_output_format;
+ bool single_output_bitdepth;
int i;
dev_dbg(sdev->dev, "copier %s, type %d", swidget->widget->name, swidget->id);
@@ -1784,9 +1849,9 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
return ret;
/* set the reference params for output format selection */
- single_output_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->output_pin_fmts,
- available_fmt->num_output_formats);
+ single_output_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev,
+ available_fmt->output_pin_fmts,
+ available_fmt->num_output_formats);
switch (swidget->id) {
case snd_soc_dapm_aif_in:
case snd_soc_dapm_dai_out:
@@ -1798,7 +1863,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
out_ref_rate = in_fmt->sampling_frequency;
out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
- if (!single_output_format)
+ if (!single_output_bitdepth)
out_ref_valid_bits =
SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
break;
@@ -1807,7 +1872,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
case snd_soc_dapm_dai_in:
out_ref_rate = params_rate(fe_params);
out_ref_channels = params_channels(fe_params);
- if (!single_output_format) {
+ if (!single_output_bitdepth) {
out_ref_valid_bits = sof_ipc4_get_valid_bits(sdev, fe_params);
if (out_ref_valid_bits < 0)
return out_ref_valid_bits;
@@ -1825,7 +1890,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
* if the output format is the same across all available output formats, choose
* that as the reference.
*/
- if (single_output_format) {
+ if (single_output_bitdepth) {
struct sof_ipc4_audio_format *out_fmt;
out_fmt = &available_fmt->output_pin_fmts[0].audio_fmt;
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
index 4488762f6a71..f4dc499c0ffe 100644
--- a/sound/soc/sof/ipc4-topology.h
+++ b/sound/soc/sof/ipc4-topology.h
@@ -476,7 +476,7 @@ struct sof_ipc4_process {
u32 init_config;
};
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
- struct sof_ipc4_pin_format *pin_fmts,
- u32 pin_fmts_size);
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size);
#endif
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
index c63e0d2f4b96..bea1b9d9ca28 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -666,6 +666,7 @@ static struct platform_driver snd_sof_of_mt8186_driver = {
};
module_platform_driver(snd_sof_of_mt8186_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MT8186/MT8188 platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
index fc1c016104ae..31dc98d1b1d8 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -619,6 +619,7 @@ static struct platform_driver snd_sof_of_mt8195_driver = {
};
module_platform_driver(snd_sof_of_mt8195_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MTL 8195 platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mtk-adsp-common.c b/sound/soc/sof/mediatek/mtk-adsp-common.c
index de8dbe27cd0d..20bcf5590eb8 100644
--- a/sound/soc/sof/mediatek/mtk-adsp-common.c
+++ b/sound/soc/sof/mediatek/mtk-adsp-common.c
@@ -82,3 +82,4 @@ void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
EXPORT_SYMBOL(mtk_adsp_dump);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for MTK ADSP platforms");
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
index fdcbe33d3dcf..b12b3d865ae3 100644
--- a/sound/soc/sof/nocodec.c
+++ b/sound/soc/sof/nocodec.c
@@ -110,7 +110,7 @@ static struct platform_driver sof_nocodec_audio = {
};
module_platform_driver(sof_nocodec_audio)
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("ASoC sof nocodec");
MODULE_AUTHOR("Liam Girdwood");
-MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:sof-nocodec");
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index 2d96d00f1c44..b196b2b74c26 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -100,3 +100,4 @@ void sof_acpi_remove(struct platform_device *pdev)
EXPORT_SYMBOL_NS(sof_acpi_remove, SND_SOC_SOF_ACPI_DEV);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ACPI platforms");
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index b3ac040811e7..ef9318947d74 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -485,7 +485,7 @@ sink_prepare:
if (ret < 0) {
/* unprepare the source widget */
if (widget_ops[widget->id].ipc_unprepare &&
- swidget && swidget->prepared) {
+ swidget && swidget->prepared && swidget->use_count == 0) {
widget_ops[widget->id].ipc_unprepare(swidget);
swidget->prepared = false;
}
diff --git a/sound/soc/sof/sof-client-ipc-flood-test.c b/sound/soc/sof/sof-client-ipc-flood-test.c
index 435614926092..e7d2001140e8 100644
--- a/sound/soc/sof/sof-client-ipc-flood-test.c
+++ b/sound/soc/sof/sof-client-ipc-flood-test.c
@@ -394,6 +394,6 @@ static struct auxiliary_driver sof_ipc_flood_client_drv = {
module_auxiliary_driver(sof_ipc_flood_client_drv);
-MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-kernel-injector.c b/sound/soc/sof/sof-client-ipc-kernel-injector.c
index 6973b6690df4..d3f541069b24 100644
--- a/sound/soc/sof/sof-client-ipc-kernel-injector.c
+++ b/sound/soc/sof/sof-client-ipc-kernel-injector.c
@@ -157,6 +157,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
module_auxiliary_driver(sof_msg_inject_client_drv);
-MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c
index af22e6421029..d0f8beb9d000 100644
--- a/sound/soc/sof/sof-client-ipc-msg-injector.c
+++ b/sound/soc/sof/sof-client-ipc-msg-injector.c
@@ -335,6 +335,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
module_auxiliary_driver(sof_msg_inject_client_drv);
-MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-probes.c
index b8f297307565..ccc7d38ddc38 100644
--- a/sound/soc/sof/sof-client-probes.c
+++ b/sound/soc/sof/sof-client-probes.c
@@ -540,6 +540,6 @@ static struct auxiliary_driver sof_probes_client_drv = {
module_auxiliary_driver(sof_probes_client_drv);
-MODULE_DESCRIPTION("SOF Probes Client Driver");
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SOF Probes Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
index b9a499e92b9a..71f7153cf79c 100644
--- a/sound/soc/sof/sof-of-dev.c
+++ b/sound/soc/sof/sof-of-dev.c
@@ -93,3 +93,4 @@ void sof_of_shutdown(struct platform_device *pdev)
EXPORT_SYMBOL(sof_of_shutdown);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for OF/DT platforms");
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 4365405783e6..38f2187da5de 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -304,3 +304,4 @@ void sof_pci_shutdown(struct pci_dev *pci)
EXPORT_SYMBOL_NS(sof_pci_shutdown, SND_SOC_SOF_PCI_DEV);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for PCI platforms");
diff --git a/sound/soc/sof/sof-utils.c b/sound/soc/sof/sof-utils.c
index cad041bf56cc..44608682e9f8 100644
--- a/sound/soc/sof/sof-utils.c
+++ b/sound/soc/sof/sof-utils.c
@@ -73,3 +73,4 @@ int snd_sof_create_page_table(struct device *dev,
EXPORT_SYMBOL(snd_sof_create_page_table);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF utils");
diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c
index eb71303aa24c..794c7bbccbaf 100644
--- a/sound/soc/sof/stream-ipc.c
+++ b/sound/soc/sof/stream-ipc.c
@@ -125,5 +125,3 @@ int sof_stream_pcm_close(struct snd_sof_dev *sdev,
return 0;
}
EXPORT_SYMBOL(sof_stream_pcm_close);
-
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index ccbc3fcdadd5..3cf8c84beff9 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -151,5 +151,5 @@ const struct dsp_arch_ops sof_xtensa_arch_ops = {
};
EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
-MODULE_DESCRIPTION("SOF Xtensa DSP support");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 1e760c315521..2b1ed91a736c 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -1472,10 +1472,11 @@ static int davinci_mcasp_hw_rule_min_periodsize(
{
struct snd_interval *period_size = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ u8 numevt = *((u8 *)rule->private);
struct snd_interval frames;
snd_interval_any(&frames);
- frames.min = 64;
+ frames.min = numevt;
frames.integer = 1;
return snd_interval_refine(period_size, &frames);
@@ -1490,6 +1491,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
u32 max_channels = 0;
int i, dir, ret;
int tdm_slots = mcasp->tdm_slots;
+ u8 *numevt;
/* Do not allow more then one stream per direction */
if (mcasp->substreams[substream->stream])
@@ -1589,9 +1591,12 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
return ret;
}
+ numevt = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ &mcasp->txnumevt :
+ &mcasp->rxnumevt;
snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- davinci_mcasp_hw_rule_min_periodsize, NULL,
+ davinci_mcasp_hw_rule_min_periodsize, numevt,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
return 0;
diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
index 639bc83f4263..cf43ac19c4a6 100644
--- a/sound/soc/ti/omap-hdmi.c
+++ b/sound/soc/ti/omap-hdmi.c
@@ -354,11 +354,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
if (!card)
return -ENOMEM;
- card->name = devm_kasprintf(dev, GFP_KERNEL,
- "HDMI %s", dev_name(ad->dssdev));
- if (!card->name)
- return -ENOMEM;
-
+ card->name = "HDMI";
card->owner = THIS_MODULE;
card->dai_link =
devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 52f076afeb96..7b32b99023a2 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,9 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_X4 0xD82
+#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -159,6 +162,9 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
+#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index e72c2b872957..e022e6eb766c 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -170,6 +170,10 @@
* CPU is not affected by Branch
* History Injection.
*/
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+ * supported
+ */
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
@@ -192,11 +196,6 @@
* File.
*/
-#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
- * IA32_XAPIC_DISABLE_STATUS MSR
- * supported
- */
-
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
* Writeback and invalidate the
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index ef11aa4cab42..9fae1b73b529 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -457,8 +457,13 @@ struct kvm_sync_regs {
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
-/* attributes for system fd (group 0) */
-#define KVM_X86_XCOMP_GUEST_SUPP 0
+/* vendor-independent attributes for system fd (group 0) */
+#define KVM_X86_GRP_SYSTEM 0
+# define KVM_X86_XCOMP_GUEST_SUPP 0
+
+/* vendor-specific groups and attributes for system fd */
+#define KVM_X86_GRP_SEV 1
+# define KVM_X86_SEV_VMSA_FEATURES 0
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -689,6 +694,9 @@ enum sev_cmd_id {
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
+ /* Second time is the charm; improved versions of the above ioctls. */
+ KVM_SEV_INIT2,
+
KVM_SEV_NR_MAX,
};
@@ -700,6 +708,14 @@ struct kvm_sev_cmd {
__u32 sev_fd;
};
+struct kvm_sev_init {
+ __u64 vmsa_features;
+ __u32 flags;
+ __u16 ghcb_version;
+ __u16 pad1;
+ __u32 pad2[8];
+};
+
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
@@ -856,5 +872,7 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
+#define KVM_X86_SEV_VM 2
+#define KVM_X86_SEV_ES_VM 3
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index d9520cb826b3..af393c7dee1f 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
static int symbols_patch(struct object *obj)
{
- int err;
+ off_t err;
if (__symbols_patch(obj, &obj->structs) ||
__symbols_patch(obj, &obj->unions) ||
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index bb52871da341..2e60e2c212cd 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -17,6 +17,7 @@ endif
MAKEFLAGS += -r
override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+override CFLAGS += -Wno-address-of-packed-member
ALL_TARGETS := hv_kvp_daemon hv_vss_daemon
ifneq ($(ARCH), aarch64)
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 75f00965ab15..d983c48a3b6a 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -842,8 +842,11 @@ __SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+#define __NR_mseal 462
+__SYSCALL(__NR_mseal, sys_mseal)
+
#undef __NR_syscalls
-#define __NR_syscalls 462
+#define __NR_syscalls 463
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 2ee338860b7e..d4d86e566e07 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PXP_STATUS 58
+/*
+ * Query if kernel allows marking a context to send a Freq hint to SLPC. This
+ * will enable use of the strategies allowed by the SLPC algorithm.
+ */
+#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
+
/* Must be kept compact -- no holes and well documented */
/**
@@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param {
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+
+/*
+ * I915_CONTEXT_PARAM_LOW_LATENCY:
+ *
+ * Mark this context as a low latency workload which requires aggressive GT
+ * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
+ * supports this per context flag.
+ */
+#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@@ -2623,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
+/*
+ * struct drm_i915_reset_stats - Return global reset and other context stats
+ *
+ * Driver keeps few stats for each contexts and also global reset count.
+ * This struct can be used to query those stats.
+ */
struct drm_i915_reset_stats {
+ /** @ctx_id: ID of the requested context */
__u32 ctx_id;
+
+ /** @flags: MBZ */
__u32 flags;
- /* All resets since boot/module reload, for all contexts */
+ /** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
- /* Number of batches lost when active in GPU, for this context */
+ /** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
- /* Number of batches lost pending for execution, for this context */
+ /** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
+ /** @pad: MBZ */
__u32 pad;
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index ea32b101b999..d03842abae57 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1221,9 +1221,9 @@ struct kvm_vfio_spapr_tce {
/* Available with KVM_CAP_SPAPR_RESIZE_HPT */
#define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt)
#define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt)
-/* Available with KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_MMU_HASH_V3 */
+/* Available with KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3 */
#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
-/* Available with KVM_CAP_PPC_RADIX_MMU */
+/* Available with KVM_CAP_PPC_MMU_RADIX */
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index a8188202413e..43742ac5b00d 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -148,6 +148,7 @@ enum {
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
NETDEV_A_QSTATS_RX_HW_DROPS,
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+ NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
NETDEV_A_QSTATS_RX_CSUM_NONE,
NETDEV_A_QSTATS_RX_CSUM_BAD,
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index 2f2ee82d5517..67626d535316 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -126,8 +126,9 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[12]; /* Spare space for future expansion */
+ __u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -155,6 +156,7 @@ struct statx {
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index a336786a22a3..50befe125ddc 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */
+ if (link_fd >= 0 || err != -EBADF) {
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+ return 0;
+ }
+
+ /* Initial multi-uprobe support in kernel didn't handle PID filtering
+ * correctly (it was doing thread filtering, not process filtering).
+ * So now we'll detect if PID filtering logic was fixed, and, if not,
+ * we'll pretend multi-uprobes are not supported, if not.
+ * Multi-uprobes are used in USDT attachment logic, and we need to be
+ * conservative here, because multi-uprobe selection happens early at
+ * load time, while the use of PID filtering is known late at
+ * attachment time, at which point it's too late to undo multi-uprobe
+ * selection.
+ *
+ * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
+ * early with -EINVAL on kernels with fixed PID filtering logic;
+ * otherwise -ESRCH would be returned if passed correct binary path
+ * (but we'll just get -BADF, of course).
+ */
+ link_opts.uprobe_multi.pid = -1; /* invalid PID */
+ link_opts.uprobe_multi.path = "/"; /* invalid path */
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
if (link_fd >= 0)
close(link_fd);
close(prog_fd);
- return link_fd < 0 && err == -EBADF;
+ return link_fd < 0 && err == -EINVAL;
}
static int probe_kern_bpf_cookie(int token_fd)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 5c35c0d89306..e6d56b555369 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -214,6 +214,7 @@ NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
+ VMLINUX_H=$(src-perf)/util/bpf_skel/vmlinux/vmlinux.h
config := 0
endif
endif
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 532b855df589..1464c6be6eb3 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -376,3 +376,4 @@
459 n64 lsm_get_self_attr sys_lsm_get_self_attr
460 n64 lsm_set_self_attr sys_lsm_set_self_attr
461 n64 lsm_list_modules sys_lsm_list_modules
+462 n64 mseal sys_mseal
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 17173b82ca21..3656f1ca7a21 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -548,3 +548,4 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 095bb86339a7..bd0fee24ad10 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -464,3 +464,4 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal sys_mseal
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 7e8d46f4147f..a396f6e6ab5b 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -374,7 +374,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
-453 64 map_shadow_stack sys_map_shadow_stack
+453 common map_shadow_stack sys_map_shadow_stack
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
@@ -383,6 +383,7 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 66a3de8ac661..0a8ba1323d64 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1956,8 +1956,7 @@ static void record__read_lost_samples(struct record *rec)
if (count.lost) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
@@ -1973,8 +1972,7 @@ static void record__read_lost_samples(struct record *rec)
lost_count = perf_bpf_filter__lost_count(evsel);
if (lost_count) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 51eca671c797..08a3a6effac1 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -765,7 +765,7 @@ static const char *fcntl_cmds[] = {
static DEFINE_STRARRAY(fcntl_cmds, "F_");
static const char *fcntl_linux_specific_cmds[] = {
- "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
+ "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
};
diff --git a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
index d18bfb238f66..13aea8fc3d45 100644
--- a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
+++ b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
@@ -97,10 +97,16 @@
#define LOCAL_TIMER_VECTOR 0xec
+/*
+ * Posted interrupt notification vector for all device MSIs delivered to
+ * the host kernel.
+ */
+#define POSTED_MSI_NOTIFICATION_VECTOR 0xeb
+
#define NR_VECTORS 256
#ifdef CONFIG_X86_LOCAL_APIC
-#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#define FIRST_SYSTEM_VECTOR POSTED_MSI_NOTIFICATION_VECTOR
#else
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 139c330ccf2c..89d16b90370b 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -16,6 +16,7 @@ struct cred;
struct socket;
struct sock;
struct sk_buff;
+struct proto_accept_arg;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -433,7 +434,7 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern struct file *do_accept(struct file *file, unsigned file_flags,
+extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
index 282e90aeb163..c0bcc185fa48 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
@@ -9,6 +9,14 @@
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2)
+
+#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
+
+/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
*/
@@ -18,12 +26,6 @@
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
/*
- * Request nofications on a directory.
- * See below for events that may be notified.
- */
-#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
-
-/*
* Set and get of pipe page size array
*/
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 370ed14b1ae0..35791791a879 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -306,4 +306,26 @@ struct prctl_mm_map {
# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc
# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f
+#define PR_RISCV_SET_ICACHE_FLUSH_CTX 71
+# define PR_RISCV_CTX_SW_FENCEI_ON 0
+# define PR_RISCV_CTX_SW_FENCEI_OFF 1
+# define PR_RISCV_SCOPE_PER_PROCESS 0
+# define PR_RISCV_SCOPE_PER_THREAD 1
+
+/* PowerPC Dynamic Execution Control Register (DEXCR) controls */
+#define PR_PPC_GET_DEXCR 72
+#define PR_PPC_SET_DEXCR 73
+/* DEXCR aspect to act on */
+# define PR_PPC_DEXCR_SBHE 0 /* Speculative branch hint enable */
+# define PR_PPC_DEXCR_IBRTPD 1 /* Indirect branch recurrent target prediction disable */
+# define PR_PPC_DEXCR_SRAPD 2 /* Subroutine return address prediction disable */
+# define PR_PPC_DEXCR_NPHIE 3 /* Non-privileged hash instruction enable */
+/* Action to apply / return */
+# define PR_PPC_DEXCR_CTRL_EDITABLE 0x1 /* Aspect can be modified with PR_PPC_SET_DEXCR */
+# define PR_PPC_DEXCR_CTRL_SET 0x2 /* Set the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_CLEAR 0x4 /* Clear the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_SET_ONEXEC 0x8 /* Set the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_MASK 0x1f
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h
index 2f2ee82d5517..67626d535316 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/stat.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h
@@ -126,8 +126,9 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[12]; /* Spare space for future expansion */
+ __u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -155,6 +156,7 @@ struct statx {
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index c519cc89c97f..0a56e22240fc 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -41,6 +41,16 @@ union core_pstate {
unsigned res1:31;
unsigned en:1;
} pstatedef;
+ /* since fam 1Ah: */
+ struct {
+ unsigned fid:12;
+ unsigned res1:2;
+ unsigned vid:8;
+ unsigned iddval:8;
+ unsigned idddiv:2;
+ unsigned res2:31;
+ unsigned en:1;
+ } pstatedef2;
unsigned long long val;
};
@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
{
int t;
+ /* Fam 1Ah onward do not use did */
+ if (cpupower_cpu_info.family >= 0x1A)
+ return 0;
+
if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
t = pstate.pstatedef.did;
else if (cpupower_cpu_info.family == 0x12)
@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
static int get_cof(union core_pstate pstate)
{
int t;
- int fid, did, cof;
+ int fid, did, cof = 0;
did = get_did(pstate);
if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
- fid = pstate.pstatedef.fid;
- cof = 200 * fid / did;
+ if (cpupower_cpu_info.family >= 0x1A) {
+ fid = pstate.pstatedef2.fid;
+ if (fid > 0x0f)
+ cof = (fid * 5);
+ } else {
+ fid = pstate.pstatedef.fid;
+ cof = 200 * fid / did;
+ }
} else {
t = 0x10;
fid = pstate.pstate.fid;
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 2d6dce2c8f77..b1e6817f1e54 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -14,6 +14,7 @@ turbostat : turbostat.c
override CFLAGS += -O2 -Wall -Wextra -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS += -DBUILD_BUG_HEADER='"../../../../include/linux/build_bug.h"'
override CFLAGS += -D_FILE_OFFSET_BITS=64
override CFLAGS += -D_FORTIFY_SOURCE=2
@@ -44,10 +45,13 @@ snapshot: turbostat
@echo "#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
@echo "#define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+ @echo '#define BUILD_BUG_ON(cond) do { enum { compile_time_check ## __COUNTER__ = 1/(!(cond)) }; } while (0)' > $(SNAPSHOT)/build_bug.h
+
@echo PWD=. > $(SNAPSHOT)/Makefile
@echo "CFLAGS += -DMSRHEADER='\"msr-index.h\"'" >> $(SNAPSHOT)/Makefile
@echo "CFLAGS += -DINTEL_FAMILY_HEADER='\"intel-family.h\"'" >> $(SNAPSHOT)/Makefile
- @sed -e's/.*MSRHEADER.*//' -e's/.*INTEL_FAMILY_HEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DBUILD_BUG_HEADER='\"build_bug.h\"'" >> $(SNAPSHOT)/Makefile
+ @sed -e's/.*MSRHEADER.*//' -e's/.*INTEL_FAMILY_HEADER.*//' -e's/.*BUILD_BUG_HEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
@rm -f $(SNAPSHOT).tar.gz
tar cvzf $(SNAPSHOT).tar.gz $(SNAPSHOT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8cdf41906e98..9f5d053d4bc6 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -10,6 +10,7 @@
#define _GNU_SOURCE
#include MSRHEADER
#include INTEL_FAMILY_HEADER
+#include BUILD_BUG_HEADER
#include <stdarg.h>
#include <stdio.h>
#include <err.h>
@@ -38,7 +39,6 @@
#include <stdbool.h>
#include <assert.h>
#include <linux/kernel.h>
-#include <linux/build_bug.h>
#define UNUSED(x) (void)(x)
@@ -5695,9 +5695,6 @@ static void probe_intel_uncore_frequency_cluster(void)
if (access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00/current_freq_khz", R_OK))
return;
- if (quiet)
- return;
-
for (uncore_max_id = 0;; ++uncore_max_id) {
sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/uncore%02d", uncore_max_id);
@@ -5727,6 +5724,14 @@ static void probe_intel_uncore_frequency_cluster(void)
sprintf(path, "%s/fabric_cluster_id", path_base);
cluster_id = read_sysfs_int(path);
+ sprintf(path, "%s/current_freq_khz", path_base);
+ sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
+
+ add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
+
+ if (quiet)
+ continue;
+
sprintf(path, "%s/min_freq_khz", path_base);
k = read_sysfs_int(path);
sprintf(path, "%s/max_freq_khz", path_base);
@@ -5743,11 +5748,6 @@ static void probe_intel_uncore_frequency_cluster(void)
sprintf(path, "%s/current_freq_khz", path_base);
k = read_sysfs_int(path);
fprintf(outf, " %d MHz\n", k / 1000);
-
- sprintf(path, "%s/current_freq_khz", path_base);
- sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
-
- add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
}
}
@@ -8424,7 +8424,7 @@ void cmdline(int argc, char **argv)
* Parse some options early, because they may make other options invalid,
* like adding the MSR counter with --add and at the same time using --no-msr.
*/
- while ((opt = getopt_long_only(argc, argv, "MP", long_options, &option_index)) != -1) {
+ while ((opt = getopt_long_only(argc, argv, "MPn:", long_options, &option_index)) != -1) {
switch (opt) {
case 'M':
no_msr = 1;
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 6584443144de..eaf091a3d331 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -3,6 +3,7 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/sizes.h>
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
index 5af9ba8a4645..c1ce39874e2b 100644
--- a/tools/testing/selftests/alsa/Makefile
+++ b/tools/testing/selftests/alsa/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
-CFLAGS += $(shell pkg-config --cflags alsa)
+CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
LDLIBS += $(shell pkg-config --libs alsa)
ifeq ($(LDLIBS),)
LDLIBS += -lasound
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e0b3887b3d2d..dd49c1d23a60 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -457,7 +457,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c \
trace_printk.c trace_vprintk.c map_ptr_kern.c \
core_kern.c core_kern_overflow.c test_ringbuf.c \
- test_ringbuf_n.c test_ringbuf_map_key.c
+ test_ringbuf_n.c test_ringbuf_map_key.c test_ringbuf_write.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index 4c6f42dae409..da430df45aa4 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -12,9 +12,11 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
+
#include "test_ringbuf.lskel.h"
#include "test_ringbuf_n.lskel.h"
#include "test_ringbuf_map_key.lskel.h"
+#include "test_ringbuf_write.lskel.h"
#define EDONE 7777
@@ -84,6 +86,58 @@ static void *poll_thread(void *input)
return (void *)(long)ring_buffer__poll(ringbuf, timeout);
}
+static void ringbuf_write_subtest(void)
+{
+ struct test_ringbuf_write_lskel *skel;
+ int page_size = getpagesize();
+ size_t *mmap_ptr;
+ int err, rb_fd;
+
+ skel = test_ringbuf_write_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->maps.ringbuf.max_entries = 0x4000;
+
+ err = test_ringbuf_write_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ rb_fd = skel->maps.ringbuf.map_fd;
+
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
+ goto cleanup;
+ *mmap_ptr = 0x3000;
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ skel->bss->pid = getpid();
+
+ ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
+ goto cleanup;
+
+ err = test_ringbuf_write_lskel__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup_ringbuf;
+
+ skel->bss->discarded = 0;
+ skel->bss->passed = 0;
+
+ /* trigger exactly two samples */
+ syscall(__NR_getpgid);
+ syscall(__NR_getpgid);
+
+ ASSERT_EQ(skel->bss->discarded, 2, "discarded");
+ ASSERT_EQ(skel->bss->passed, 0, "passed");
+
+ test_ringbuf_write_lskel__detach(skel);
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_write_lskel__destroy(skel);
+}
+
static void ringbuf_subtest(void)
{
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
@@ -451,4 +505,6 @@ void test_ringbuf(void)
ringbuf_n_subtest();
if (test__start_subtest("ringbuf_map_key"))
ringbuf_map_key_subtest();
+ if (test__start_subtest("ringbuf_write"))
+ ringbuf_write_subtest();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
index 15ee7b2fc410..b9135720024c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
@@ -73,6 +73,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
"up primary");
ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
"addr primary");
+
+ if (mode == NETKIT_L3) {
+ ASSERT_EQ(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd 2> /dev/null"), 512,
+ "set hwaddress");
+ } else {
+ ASSERT_OK(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd"),
+ "set hwaddress");
+ }
if (same_netns) {
ASSERT_OK(system("ip link set dev " netkit_peer " up"),
"up peer");
@@ -89,6 +99,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
return err;
}
+static void move_netkit(void)
+{
+ ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+ "move peer");
+ ASSERT_OK(system("ip netns exec foo ip link set dev "
+ netkit_peer " up"), "up peer");
+ ASSERT_OK(system("ip netns exec foo ip addr add dev "
+ netkit_peer " 10.0.0.2/24"), "addr peer");
+}
+
static void destroy_netkit(void)
{
ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
@@ -685,3 +705,77 @@ void serial_test_tc_netkit_neigh_links(void)
serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
}
+
+static void serial_test_tc_netkit_pkt_type_mode(int mode)
+{
+ LIBBPF_OPTS(bpf_netkit_opts, optl_nk);
+ LIBBPF_OPTS(bpf_tcx_opts, optl_tcx);
+ int err, ifindex, ifindex2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, true);
+ if (err)
+ return;
+
+ ifindex2 = if_nametoindex(netkit_peer);
+ ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc7,
+ BPF_TCX_INGRESS), 0, "tc7_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl_nk);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc7, ifindex2, &optl_tcx);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc7 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 1);
+
+ move_netkit();
+
+ tc_skel_reset_all_seen(skel);
+ skel->bss->set_type = true;
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc7, true, "seen_tc7");
+
+ ASSERT_EQ(skel->bss->seen_host, true, "seen_host");
+ ASSERT_EQ(skel->bss->seen_mcast, true, "seen_mcast");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_pkt_type(void)
+{
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L2);
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L3);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 8269cdee33ae..bf6ca8e3eb13 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <unistd.h>
+#include <pthread.h>
#include <test_progs.h>
#include "uprobe_multi.skel.h"
#include "uprobe_multi_bench.skel.h"
#include "uprobe_multi_usdt.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
+#include "../sdt.h"
static char test_data[] = "test_data";
@@ -25,9 +27,17 @@ noinline void uprobe_multi_func_3(void)
asm volatile ("");
}
+noinline void usdt_trigger(void)
+{
+ STAP_PROBE(test, pid_filter_usdt);
+}
+
struct child {
int go[2];
+ int c2p[2]; /* child -> parent channel */
int pid;
+ int tid;
+ pthread_t thread;
};
static void release_child(struct child *child)
@@ -38,6 +48,10 @@ static void release_child(struct child *child)
return;
close(child->go[1]);
close(child->go[0]);
+ if (child->thread)
+ pthread_join(child->thread, NULL);
+ close(child->c2p[0]);
+ close(child->c2p[1]);
if (child->pid > 0)
waitpid(child->pid, &child_status, 0);
}
@@ -63,7 +77,7 @@ static struct child *spawn_child(void)
if (pipe(child.go))
return NULL;
- child.pid = fork();
+ child.pid = child.tid = fork();
if (child.pid < 0) {
release_child(&child);
errno = EINVAL;
@@ -82,6 +96,7 @@ static struct child *spawn_child(void)
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
+ usdt_trigger();
exit(errno);
}
@@ -89,6 +104,67 @@ static struct child *spawn_child(void)
return &child;
}
+static void *child_thread(void *ctx)
+{
+ struct child *child = ctx;
+ int c = 0, err;
+
+ child->tid = syscall(SYS_gettid);
+
+ /* let parent know we are ready */
+ err = write(child->c2p[1], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ /* wait for parent's kick */
+ err = read(child->go[0], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+
+ err = 0;
+ pthread_exit(&err);
+}
+
+static struct child *spawn_thread(void)
+{
+ static struct child child;
+ int c, err;
+
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child.go))
+ return NULL;
+ /* pipe to notify parent that child thread is ready */
+ if (pipe(child.c2p)) {
+ close(child.go[0]);
+ close(child.go[1]);
+ return NULL;
+ }
+
+ child.pid = getpid();
+
+ err = pthread_create(&child.thread, NULL, child_thread, &child);
+ if (err) {
+ err = -errno;
+ close(child.go[0]);
+ close(child.go[1]);
+ close(child.c2p[0]);
+ close(child.c2p[1]);
+ errno = -err;
+ return NULL;
+ }
+
+ err = read(child.c2p[0], &c, 1);
+ if (!ASSERT_EQ(err, 1, "child_thread_ready"))
+ return NULL;
+
+ return &child;
+}
+
static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
{
skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
@@ -103,15 +179,23 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
* passed at the probe attach.
*/
skel->bss->pid = child ? 0 : getpid();
+ skel->bss->expect_pid = child ? child->pid : 0;
+
+ /* trigger all probes, if we are testing child *process*, just to make
+ * sure that PID filtering doesn't let through activations from wrong
+ * PIDs; when we test child *thread*, we don't want to do this to
+ * avoid double counting number of triggering events
+ */
+ if (!child || !child->thread) {
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+ }
if (child)
kick_child(child);
- /* trigger all probes */
- uprobe_multi_func_1();
- uprobe_multi_func_2();
- uprobe_multi_func_3();
-
/*
* There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
* function and each slepable probe (6) increments uprobe_multi_sleep_result.
@@ -126,8 +210,12 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
- if (child)
+ ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
+
+ if (child) {
ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
+ }
}
static void test_skel_api(void)
@@ -190,8 +278,24 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul
if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
goto cleanup;
+ /* Attach (uprobe-backed) USDTs */
+ skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
+ goto cleanup;
+
+ skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
+ goto cleanup;
+
uprobe_multi_test_run(skel, child);
+ ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
+ if (child) {
+ ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
+ }
cleanup:
uprobe_multi__destroy(skel);
}
@@ -210,6 +314,13 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi
return;
__test_attach_api(binary, pattern, opts, child);
+
+ /* pid filter (thread) */
+ child = spawn_thread();
+ if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ return;
+
+ __test_attach_api(binary, pattern, opts, child);
}
static void test_attach_api_pattern(void)
@@ -397,7 +508,7 @@ static void test_attach_api_fails(void)
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_ERR(link_fd, "link_fd"))
goto cleanup;
- ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong");
+ ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
cleanup:
if (link_fd >= 0)
@@ -495,6 +606,13 @@ static void test_link_api(void)
return;
__test_link_api(child);
+
+ /* pid filter (thread) */
+ child = spawn_thread();
+ if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ return;
+
+ __test_link_api(child);
}
static void test_bench_attach_uprobe(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index c60db8beeb73..98ef39efa77e 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -53,6 +53,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_or_jmp32_k.skel.h"
#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
@@ -67,6 +68,7 @@
#include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h"
#include "verifier_sock_addr.skel.h"
+#include "verifier_sockmap_mutate.skel.h"
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
@@ -169,6 +171,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
@@ -183,6 +186,7 @@ void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_sock_addr(void) { RUN(verifier_sock_addr); }
+void test_verifier_sockmap_mutate(void) { RUN(verifier_sockmap_mutate); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
new file mode 100644
index 000000000000..350513c0e4c9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* outputs */
+long passed = 0;
+long discarded = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_write(void *ctx)
+{
+ int *foo, cur_pid = bpf_get_current_pid_tgid() >> 32;
+ void *sample1, *sample2;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ if (!sample1)
+ return 0;
+ /* first one can pass */
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ if (!sample2) {
+ bpf_ringbuf_discard(sample1, 0);
+ __sync_fetch_and_add(&discarded, 1);
+ return 0;
+ }
+ /* second one must not */
+ __sync_fetch_and_add(&passed, 1);
+ foo = sample2 + 4084;
+ *foo = 256;
+ bpf_ringbuf_discard(sample1, 0);
+ bpf_ringbuf_discard(sample2, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
index 02e718f06e0f..40531e56776e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
}
SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
struct sock *accepted_sk)
{
set_task_info(accepted_sk);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c
index 992400acb957..ab3eae3d6af8 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_link.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_link.c
@@ -4,7 +4,8 @@
#include <linux/bpf.h>
#include <linux/if_ether.h>
-
+#include <linux/stddef.h>
+#include <linux/if_packet.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
@@ -16,7 +17,13 @@ bool seen_tc3;
bool seen_tc4;
bool seen_tc5;
bool seen_tc6;
+bool seen_tc7;
+
+bool set_type;
+
bool seen_eth;
+bool seen_host;
+bool seen_mcast;
SEC("tc/ingress")
int tc1(struct __sk_buff *skb)
@@ -28,8 +35,16 @@ int tc1(struct __sk_buff *skb)
if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
goto out;
seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+ seen_host = skb->pkt_type == PACKET_HOST;
+ if (seen_host && set_type) {
+ eth.h_dest[0] = 4;
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
+ goto fail;
+ bpf_skb_change_type(skb, PACKET_MULTICAST);
+ }
out:
seen_tc1 = true;
+fail:
return TCX_NEXT;
}
@@ -67,3 +82,21 @@ int tc6(struct __sk_buff *skb)
seen_tc6 = true;
return TCX_PASS;
}
+
+SEC("tc/ingress")
+int tc7(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ if (eth.h_dest[0] == 4 && set_type) {
+ seen_mcast = skb->pkt_type == PACKET_MULTICAST;
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+out:
+ seen_tc7 = true;
+ return TCX_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c
index 419d9aa28fce..44190efcdba2 100644
--- a/tools/testing/selftests/bpf/progs/uprobe_multi.c
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <stdbool.h>
+#include <bpf/usdt.bpf.h>
char _license[] SEC("license") = "GPL";
@@ -22,6 +22,13 @@ __u64 uprobe_multi_sleep_result = 0;
int pid = 0;
int child_pid = 0;
+int child_tid = 0;
+int child_pid_usdt = 0;
+int child_tid_usdt = 0;
+
+int expect_pid = 0;
+bool bad_pid_seen = false;
+bool bad_pid_seen_usdt = false;
bool test_cookie = false;
void *user_ptr = 0;
@@ -36,11 +43,19 @@ static __always_inline bool verify_sleepable_user_copy(void)
static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
{
- child_pid = bpf_get_current_pid_tgid() >> 32;
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
- if (pid && child_pid != pid)
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
return;
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen = true;
+
+ child_pid = cur_pid_tgid >> 32;
+ child_tid = (__u32)cur_pid_tgid;
+
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx);
@@ -97,5 +112,32 @@ int uretprobe_sleep(struct pt_regs *ctx)
SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uprobe_extra(struct pt_regs *ctx)
{
+ /* we need this one just to mix PID-filtered and global uprobes */
+ return 0;
+}
+
+SEC("usdt")
+int usdt_pid(struct pt_regs *ctx)
+{
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
+
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
+ return 0;
+
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen_usdt = true;
+
+ child_pid_usdt = cur_pid_tgid >> 32;
+ child_tid_usdt = (__u32)cur_pid_tgid;
+
+ return 0;
+}
+
+SEC("usdt")
+int usdt_extra(struct pt_regs *ctx)
+{
+ /* we need this one just to mix PID-filtered and global USDT probes */
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index bd676d7e615f..80c737b6d340 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -274,6 +274,58 @@ static __naked void iter_limit_bug_cb(void)
);
}
+int tmp_var;
+SEC("socket")
+__failure __msg("infinite loop detected at insn 2")
+__naked void jgt_imm64_and_may_goto(void)
+{
+ asm volatile (" \
+ r0 = %[tmp_var] ll; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -3; /* off -3 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm_addr(tmp_var)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("infinite loop detected at insn 1")
+__naked void may_goto_self(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -1; /* off -1 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void may_goto_neg_off(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+ goto l0_%=; \
+ goto l1_%=; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -2; /* off -2 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+l1_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
SEC("tc")
__failure
__flag(BPF_F_TEST_STATE_FREQ)
@@ -307,6 +359,100 @@ int iter_limit_bug(struct __sk_buff *skb)
return 0;
}
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto2(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void jlt_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: call %[bpf_jiffies64]; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ if r0 < 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm(bpf_jiffies64)
+ : __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+SEC("socket")
+__success __retval(0)
+__naked void gotol_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ gotol l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+#endif
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto_subprog(void)
+{
+ asm volatile (" \
+ call subprog_with_may_goto; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __used
+void subprog_with_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
#define ARR_SZ 1000000
int zero;
char arr[ARR_SZ];
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index cbb9d6714f53..028ec855587b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -224,6 +224,69 @@ l0_%=: \
: __clobber_all);
}
+SEC("socket")
+__description("MOV32SX, S8, var_off u32_max")
+__failure __msg("infinite loop detected")
+__failure_unpriv __msg_unpriv("back-edge from insn 2 to 0")
+__naked void mov64sx_s32_varoff_1(void)
+{
+ asm volatile (" \
+l0_%=: \
+ r3 = *(u8 *)(r10 -387); \
+ w7 = (s8)w3; \
+ if w7 >= 0x2533823b goto l0_%=; \
+ w0 = 0; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, positive after s8 extension")
+__success __retval(0)
+__failure_unpriv __msg_unpriv("frame pointer is read only")
+__naked void mov64sx_s32_varoff_2(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ w7 = (s8)w3; \
+ if w7 s>= 16 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, negative after s8 extension")
+__success __retval(0)
+__failure_unpriv __msg_unpriv("frame pointer is read only")
+__naked void mov64sx_s32_varoff_3(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ r3 |= 0x80; \
+ w7 = (s8)w3; \
+ if w7 s>= -5 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
new file mode 100644
index 000000000000..f37713a265ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("or_jmp32_k: bit ops + branch on unknown value")
+__failure
+__msg("R0 invalid mem access 'scalar'")
+__naked void or_jmp32_k(void)
+{
+ asm volatile (" \
+ r0 = 0xffffffff; \
+ r0 /= 1; \
+ r1 = 0; \
+ w1 = -1; \
+ w1 >>= 1; \
+ w0 &= w1; \
+ w0 |= 2; \
+ if w0 != 0x7ffffffd goto l1; \
+ r0 = 1; \
+ exit; \
+l3: \
+ r0 = 5; \
+ *(u64*)(r0 - 8) = r0; \
+ exit; \
+l2: \
+ w0 -= 0xe; \
+ if w0 == 1 goto l3; \
+ r0 = 4; \
+ exit; \
+l1: \
+ w0 -= 0x7ffffff0; \
+ if w0 s>= 0xe goto l2; \
+ r0 = 3; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
new file mode 100644
index 000000000000..fe4b123187b8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+#define __always_unused __attribute__((unused))
+
+char _license[] SEC("license") = "GPL";
+
+struct sock {
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__sockmap {
+ union {
+ struct sock *sk;
+ };
+} __attribute__((preserve_access_index));
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockhash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockmap SEC(".maps");
+
+enum { CG_OK = 1 };
+
+int zero = 0;
+
+static __always_inline void test_sockmap_delete(void)
+{
+ bpf_map_delete_elem(&sockmap, &zero);
+ bpf_map_delete_elem(&sockhash, &zero);
+}
+
+static __always_inline void test_sockmap_update(void *sk)
+{
+ if (sk) {
+ bpf_map_update_elem(&sockmap, &zero, sk, BPF_ANY);
+ bpf_map_update_elem(&sockhash, &zero, sk, BPF_ANY);
+ }
+}
+
+static __always_inline void test_sockmap_lookup_and_update(void)
+{
+ struct bpf_sock *sk = bpf_map_lookup_elem(&sockmap, &zero);
+
+ if (sk) {
+ test_sockmap_update(sk);
+ bpf_sk_release(sk);
+ }
+}
+
+static __always_inline void test_sockmap_mutate(void *sk)
+{
+ test_sockmap_delete();
+ test_sockmap_update(sk);
+}
+
+static __always_inline void test_sockmap_lookup_and_mutate(void)
+{
+ test_sockmap_delete();
+ test_sockmap_lookup_and_update();
+}
+
+SEC("action")
+__success
+int test_sched_act(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("classifier")
+__success
+int test_sched_cls(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("flow_dissector")
+__success
+int test_flow_dissector_delete(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("flow_dissector")
+__failure __msg("program of this type cannot use helper bpf_sk_release")
+int test_flow_dissector_update(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_lookup_and_update(); /* no access to skb->sk */
+ return 0;
+}
+
+SEC("iter/sockmap")
+__success
+int test_trace_iter(struct bpf_iter__sockmap *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_delete(const void *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_update(const void *ctx __always_unused)
+{
+ test_sockmap_lookup_and_update();
+ return 0;
+}
+
+SEC("sk_lookup")
+__success
+int test_sk_lookup(struct bpf_sk_lookup *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("sk_reuseport")
+__success
+int test_sk_reuseport(struct sk_reuseport_md *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("socket")
+__success
+int test_socket_filter(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("sockops")
+__success
+int test_sockops_delete(struct bpf_sock_ops *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return CG_OK;
+}
+
+SEC("sockops")
+__failure __msg("cannot update sockmap in this context")
+int test_sockops_update(struct bpf_sock_ops *ctx)
+{
+ test_sockmap_update(ctx->sk);
+ return CG_OK;
+}
+
+SEC("sockops")
+__success
+int test_sockops_update_dedicated(struct bpf_sock_ops *ctx)
+{
+ bpf_sock_map_update(ctx, &sockmap, &zero, BPF_ANY);
+ bpf_sock_hash_update(ctx, &sockhash, &zero, BPF_ANY);
+ return CG_OK;
+}
+
+SEC("xdp")
+__success
+int test_xdp(struct xdp_md *ctx __always_unused)
+{
+ test_sockmap_lookup_and_mutate();
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
index b171fd53b004..632ab44737ec 100644
--- a/tools/testing/selftests/cachestat/test_cachestat.c
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <stdio.h>
#include <stdbool.h>
diff --git a/tools/testing/selftests/drivers/net/virtio_net/config b/tools/testing/selftests/drivers/net/virtio_net/config
index f35de0542b60..bcf7555eaffe 100644
--- a/tools/testing/selftests/drivers/net/virtio_net/config
+++ b/tools/testing/selftests/drivers/net/virtio_net/config
@@ -1,2 +1,8 @@
-CONFIG_VIRTIO_NET=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_CGROUP_BPF=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=m
CONFIG_VIRTIO_DEBUG=y
+CONFIG_VIRTIO_NET=y
diff --git a/tools/testing/selftests/fchmodat2/Makefile b/tools/testing/selftests/fchmodat2/Makefile
index 71ec34bf1501..4373cea79b79 100644
--- a/tools/testing/selftests/fchmodat2/Makefile
+++ b/tools/testing/selftests/fchmodat2/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES)
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
+
+# gcc requires -static-libasan in order to ensure that Address Sanitizer's
+# library is the first one loaded. However, clang already statically links the
+# Address Sanitizer if -fsanitize is specified. Therefore, simply omit
+# -static-libasan for clang builds.
+ifeq ($(LLVM),)
+ CFLAGS += -static-libasan
+endif
+
TEST_GEN_PROGS := fchmodat2_test
include ../lib.mk
diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
index 759f86e7d263..2862aae58b79 100644
--- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
+++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <inttypes.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
index e6d7c4f1c85b..e8c019d72cbf 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount_test.c
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
@@ -125,8 +125,16 @@ static uint32_t old_root_id, old_parent_id;
static void cleanup_namespace(void)
{
- fchdir(orig_root);
- chroot(".");
+ int ret;
+
+ ret = fchdir(orig_root);
+ if (ret == -1)
+ ksft_perror("fchdir to original root");
+
+ ret = chroot(".");
+ if (ret == -1)
+ ksft_perror("chroot to original root");
+
umount2(root_mntpoint, MNT_DETACH);
rmdir(root_mntpoint);
}
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index e59d985eeff0..048a312abf40 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -1,16 +1,28 @@
-CONFIG_KPROBES=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_EPROBE_EVENTS=y
+CONFIG_FPROBE=y
+CONFIG_FPROBE_EVENTS=y
CONFIG_FTRACE=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_PROFILER=y
-CONFIG_TRACER_SNAPSHOT=y
-CONFIG_STACK_TRACER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_SCHED_TRACER=y
-CONFIG_PREEMPT_TRACER=y
CONFIG_IRQSOFF_TRACER=y
-CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_KPROBE_EVENTS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_PREEMPT_TRACER=y
+CONFIG_PROBE_EVENTS_BTF_ARGS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_TRACE_PRINTK=m
-CONFIG_KALLSYMS_ALL=y
+CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_UPROBES=y
+CONFIG_UPROBE_EVENTS=y
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
index d3a79da215c8..5f72abe6fa79 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
echo 0 > events/enable
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index 3f74c09c56b6..118247b8dd84 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -10,7 +10,6 @@ fail() { #msg
}
sample_events() {
- echo > trace
echo 1 > events/kmem/kmem_cache_free/enable
echo 1 > tracing_on
ls > /dev/null
@@ -22,6 +21,7 @@ echo 0 > tracing_on
echo 0 > events/enable
echo "Get the most frequently calling function"
+echo > trace
sample_events
target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
@@ -32,7 +32,16 @@ echo > trace
echo "Test event filter function name"
echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
+
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
echo "Test event filter function address"
echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
+echo > trace
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
index 1f6981ef7afa..ba19b81cef39 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
@@ -30,7 +30,8 @@ find_dot_func() {
fi
grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
- if grep -s $f available_filter_functions; then
+ cnt=`grep -s $f available_filter_functions | wc -l`;
+ if [ $cnt -eq 1 ]; then
echo $f
break
fi
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index 11e157d7533b..78ab2cd111f6 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -3,8 +3,6 @@ SUBDIRS := functional
TEST_PROGS := run.sh
-.PHONY: all clean
-
include ../lib.mk
all:
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index a392d0917b4e..994fa3468f17 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
LDLIBS := -lpthread -lrt
LOCAL_HDRS := \
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 7f3ca5c78df1..215c6cb539b4 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -360,7 +360,7 @@ out:
int main(int argc, char *argv[])
{
- const char *test_name;
+ char *test_name;
int c, ret;
while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ce8ff8e8ce3a..ac280dcba996 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -183,6 +183,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
+TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 8eb57de0b587..c0c7c1fe93f9 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -277,6 +277,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
+#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c
index 14ee17151a59..b5035c63d516 100644
--- a/tools/testing/selftests/kvm/lib/riscv/ucall.c
+++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c
@@ -9,6 +9,7 @@
#include "kvm_util.h"
#include "processor.h"
+#include "sbi.h"
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c664e446136b..594b061aef52 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1247,9 +1247,20 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
- uint8_t maxphyaddr;
+ uint8_t maxphyaddr, guest_maxphyaddr;
- max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+ /*
+ * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
+ * enumerates the max _mappable_ GPA, which can be less than the raw
+ * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
+ * doesn't support 5-level TDP.
+ */
+ guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
+ guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
+ TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
+ "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
+
+ max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */
if (!host_cpu_is_amd)
diff --git a/tools/testing/selftests/kvm/riscv/ebreak_test.c b/tools/testing/selftests/kvm/riscv/ebreak_test.c
index 823c132069b4..0e0712854953 100644
--- a/tools/testing/selftests/kvm/riscv/ebreak_test.c
+++ b/tools/testing/selftests/kvm/riscv/ebreak_test.c
@@ -6,6 +6,7 @@
*
*/
#include "kvm_util.h"
+#include "ucall_common.h"
#define LABEL_ADDRESS(v) ((uint64_t)&(v))
diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
index 69bb94e6b227..f299cbfd23ca 100644
--- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
+++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
@@ -15,6 +15,7 @@
#include "processor.h"
#include "sbi.h"
#include "arch_timer.h"
+#include "ucall_common.h"
/* Maximum counters(firmware + hardware) */
#define RISCV_MAX_PMU_COUNTERS 64
diff --git a/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c
new file mode 100644
index 000000000000..bba0d9a6dcc8
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test shared zeropage handling (with/without storage keys)
+ *
+ * Copyright (C) 2024, Red Hat, Inc.
+ */
+#include <sys/mman.h>
+
+#include <linux/fs.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "ucall_common.h"
+
+static void set_storage_key(void *addr, uint8_t skey)
+{
+ asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+}
+
+static void guest_code(void)
+{
+ /* Issue some storage key instruction. */
+ set_storage_key((void *)0, 0x98);
+ GUEST_DONE();
+}
+
+/*
+ * Returns 1 if the shared zeropage is mapped, 0 if something else is mapped.
+ * Returns < 0 on error or if nothing is mapped.
+ */
+static int maps_shared_zeropage(int pagemap_fd, void *addr)
+{
+ struct page_region region;
+ struct pm_scan_arg arg = {
+ .start = (uintptr_t)addr,
+ .end = (uintptr_t)addr + 4096,
+ .vec = (uintptr_t)&region,
+ .vec_len = 1,
+ .size = sizeof(struct pm_scan_arg),
+ .category_mask = PAGE_IS_PFNZERO,
+ .category_anyof_mask = PAGE_IS_PRESENT,
+ .return_mask = PAGE_IS_PFNZERO,
+ };
+ return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
+}
+
+int main(int argc, char *argv[])
+{
+ char *mem, *page0, *page1, *page2, tmp;
+ const size_t pagesize = getpagesize();
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int pagemap_fd;
+
+ ksft_print_header();
+ ksft_set_plan(3);
+
+ /*
+ * We'll use memory that is not mapped into the VM for simplicity.
+ * Shared zeropages are enabled/disabled per-process.
+ */
+ mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+ TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
+
+ /* Disable THP. Ignore errors on older kernels. */
+ madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE);
+
+ page0 = mem;
+ page1 = page0 + pagesize;
+ page2 = page1 + pagesize;
+
+ /* Can we even detect shared zeropages? */
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ TEST_REQUIRE(pagemap_fd >= 0);
+
+ tmp = *page0;
+ asm volatile("" : "+r" (tmp));
+ TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1);
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ /* Verify that we get the shared zeropage after VM creation. */
+ tmp = *page1;
+ asm volatile("" : "+r" (tmp));
+ ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1,
+ "Shared zeropages should be enabled\n");
+
+ /*
+ * Let our VM execute a storage key instruction that should
+ * unshare all shared zeropages.
+ */
+ vcpu_run(vcpu);
+ get_ucall(vcpu, &uc);
+ TEST_ASSERT_EQ(uc.cmd, UCALL_DONE);
+
+ /* Verify that we don't have a shared zeropage anymore. */
+ ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1),
+ "Shared zeropage should be gone\n");
+
+ /* Verify that we don't get any new shared zeropages. */
+ tmp = *page2;
+ asm volatile("" : "+r" (tmp));
+ ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2),
+ "Shared zeropages should be disabled\n");
+
+ kvm_vm_free(vm);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c b/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
index 7a4a61be119b..3fb967f40c6a 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
@@ -105,11 +105,11 @@ void test_features(uint32_t vm_type, uint64_t supported_features)
int i;
for (i = 0; i < 64; i++) {
- if (!(supported_features & (1u << i)))
+ if (!(supported_features & BIT_ULL(i)))
test_init2_invalid(vm_type,
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) },
"unknown feature");
- else if (KNOWN_FEATURES & (1u << i))
+ else if (KNOWN_FEATURES & BIT_ULL(i))
test_init2(vm_type,
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) });
}
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 6b5a9ff88c3d..7d063c652be1 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -35,6 +35,7 @@
* See https://sourceware.org/glibc/wiki/Synchronizing_Headers.
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include "common.h"
@@ -47,6 +48,13 @@ int renameat2(int olddirfd, const char *oldpath, int newdirfd,
}
#endif
+#ifndef open_tree
+int open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+#endif
+
#ifndef RENAME_EXCHANGE
#define RENAME_EXCHANGE (1 << 1)
#endif
@@ -2400,6 +2408,43 @@ TEST_F_FORK(layout1, refer_denied_by_default4)
layer_dir_s1d1_refer);
}
+/*
+ * Tests walking through a denied root mount.
+ */
+TEST_F_FORK(layout1, refer_mount_root_deny)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_DIR,
+ };
+ int root_fd, ruleset_fd;
+
+ /* Creates a mount object from a non-mount point. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ root_fd =
+ open_tree(AT_FDCWD, dir_s1d1,
+ AT_EMPTY_PATH | OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_LE(0, root_fd);
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ /* Link denied by Landlock: EACCES. */
+ EXPECT_EQ(-1, linkat(root_fd, ".", root_fd, "does_not_exist", 0));
+ EXPECT_EQ(EACCES, errno);
+
+ /* renameat2() always returns EBUSY. */
+ EXPECT_EQ(-1, renameat2(root_fd, ".", root_fd, "does_not_exist", 0));
+ EXPECT_EQ(EBUSY, errno);
+
+ EXPECT_EQ(0, close(root_fd));
+}
+
TEST_F_FORK(layout1, reparent_link)
{
const struct rule layer1[] = {
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index 37de82da9be7..b61803e36d1c 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -656,12 +656,33 @@ unmap:
munmap(map, size);
}
+static void init_global_file_handles(void)
+{
+ mem_fd = open("/proc/self/mem", O_RDWR);
+ if (mem_fd < 0)
+ ksft_exit_fail_msg("opening /proc/self/mem failed\n");
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
+ ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
+ if (ksm_full_scans_fd < 0)
+ ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ if (pagemap_fd < 0)
+ ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
+ proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
+ proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
+ O_RDONLY);
+ ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+}
+
int main(int argc, char **argv)
{
unsigned int tests = 8;
int err;
if (argc > 1 && !strcmp(argv[1], FORK_EXEC_CHILD_PRG_NAME)) {
+ init_global_file_handles();
exit(test_child_ksm());
}
@@ -674,22 +695,7 @@ int main(int argc, char **argv)
pagesize = getpagesize();
- mem_fd = open("/proc/self/mem", O_RDWR);
- if (mem_fd < 0)
- ksft_exit_fail_msg("opening /proc/self/mem failed\n");
- ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
- if (ksm_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
- ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
- if (ksm_full_scans_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
- pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
- if (pagemap_fd < 0)
- ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
- proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
- proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
- O_RDONLY);
- ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+ init_global_file_handles();
test_unmerge();
test_unmerge_zero_pages();
diff --git a/tools/testing/selftests/mm/map_fixed_noreplace.c b/tools/testing/selftests/mm/map_fixed_noreplace.c
index b74813fdc951..d53de2486080 100644
--- a/tools/testing/selftests/mm/map_fixed_noreplace.c
+++ b/tools/testing/selftests/mm/map_fixed_noreplace.c
@@ -67,7 +67,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error: munmap failed!?\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n");
addr = base_addr + page_size;
size = 3 * page_size;
@@ -76,7 +77,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error: first mmap() failed unexpectedly\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 3*PAGE_SIZE at base+PAGE_SIZE\n");
/*
* Exact same mapping again:
@@ -93,7 +95,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:1: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n");
/*
* Second mapping contained within first:
@@ -111,7 +114,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:2: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+PAGE_SIZE\n");
/*
* Overlap end of existing mapping:
@@ -128,7 +132,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:3: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+(3*PAGE_SIZE)\n");
/*
* Overlap start of existing mapping:
@@ -145,7 +150,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:4: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE bytes at base\n");
/*
* Adjacent to start of existing mapping:
@@ -162,7 +168,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:5: mmap() failed when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() PAGE_SIZE at base\n");
/*
* Adjacent to end of existing mapping:
@@ -179,7 +186,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:6: mmap() failed when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() PAGE_SIZE at base+(4*PAGE_SIZE)\n");
addr = base_addr;
size = 5 * page_size;
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 49a56eb5d036..666ab7d9390b 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -43,7 +43,6 @@ tap
tcp_fastopen_backup_key
tcp_inq
tcp_mmap
-test_unix_oob
timestamping
tls
toeplitz
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index bd01e4a0be2c..d9393569d03a 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -43,6 +43,8 @@ TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh
TEST_PROGS += srv6_end_x_next_csid_l3vpn_test.sh
TEST_PROGS += srv6_end_flavors_test.sh
+TEST_PROGS += srv6_end_dx4_netfilter_test.sh
+TEST_PROGS += srv6_end_dx6_netfilter_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS += ndisc_unsolicited_na_test.sh
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index 3b83c797650d..50584479540b 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,4 +1,4 @@
CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := diag_uid test_unix_oob unix_connect scm_pidfd scm_rights
+TEST_GEN_PROGS := diag_uid msg_oob scm_pidfd scm_rights unix_connect
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/config b/tools/testing/selftests/net/af_unix/config
new file mode 100644
index 000000000000..37368567768c
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/config
@@ -0,0 +1,3 @@
+CONFIG_UNIX=y
+CONFIG_AF_UNIX_OOB=y
+CONFIG_UNIX_DIAG=m
diff --git a/tools/testing/selftests/net/af_unix/msg_oob.c b/tools/testing/selftests/net/af_unix/msg_oob.c
new file mode 100644
index 000000000000..16d0c172eaeb
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/msg_oob.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <netinet/in.h>
+#include <sys/epoll.h>
+#include <sys/ioctl.h>
+#include <sys/signalfd.h>
+#include <sys/socket.h>
+
+#include "../../kselftest_harness.h"
+
+#define BUF_SZ 32
+
+FIXTURE(msg_oob)
+{
+ int fd[4]; /* 0: AF_UNIX sender
+ * 1: AF_UNIX receiver
+ * 2: TCP sender
+ * 3: TCP receiver
+ */
+ int signal_fd;
+ int epoll_fd[2]; /* 0: AF_UNIX receiver
+ * 1: TCP receiver
+ */
+ bool tcp_compliant;
+};
+
+FIXTURE_VARIANT(msg_oob)
+{
+ bool peek;
+};
+
+FIXTURE_VARIANT_ADD(msg_oob, no_peek)
+{
+ .peek = false,
+};
+
+FIXTURE_VARIANT_ADD(msg_oob, peek)
+{
+ .peek = true
+};
+
+static void create_unix_socketpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ int ret;
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, self->fd);
+ ASSERT_EQ(ret, 0);
+}
+
+static void create_tcp_socketpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct sockaddr_in addr;
+ socklen_t addrlen;
+ int listen_fd;
+ int ret;
+
+ listen_fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(listen_fd, 0);
+
+ ret = listen(listen_fd, -1);
+ ASSERT_EQ(ret, 0);
+
+ addrlen = sizeof(addr);
+ ret = getsockname(listen_fd, (struct sockaddr *)&addr, &addrlen);
+ ASSERT_EQ(ret, 0);
+
+ self->fd[2] = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(self->fd[2], 0);
+
+ ret = connect(self->fd[2], (struct sockaddr *)&addr, addrlen);
+ ASSERT_EQ(ret, 0);
+
+ self->fd[3] = accept(listen_fd, (struct sockaddr *)&addr, &addrlen);
+ ASSERT_GE(self->fd[3], 0);
+
+ ret = fcntl(self->fd[3], F_SETFL, O_NONBLOCK);
+ ASSERT_EQ(ret, 0);
+}
+
+static void setup_sigurg(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct signalfd_siginfo siginfo;
+ int pid = getpid();
+ sigset_t mask;
+ int i, ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = ioctl(self->fd[i * 2 + 1], FIOSETOWN, &pid);
+ ASSERT_EQ(ret, 0);
+ }
+
+ ret = sigemptyset(&mask);
+ ASSERT_EQ(ret, 0);
+
+ ret = sigaddset(&mask, SIGURG);
+ ASSERT_EQ(ret, 0);
+
+ ret = sigprocmask(SIG_BLOCK, &mask, NULL);
+ ASSERT_EQ(ret, 0);
+
+ self->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK);
+ ASSERT_GE(self->signal_fd, 0);
+
+ ret = read(self->signal_fd, &siginfo, sizeof(siginfo));
+ ASSERT_EQ(ret, -1);
+}
+
+static void setup_epollpri(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct epoll_event event = {
+ .events = EPOLLPRI,
+ };
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ self->epoll_fd[i] = epoll_create1(0);
+ ASSERT_GE(self->epoll_fd[i], 0);
+
+ ret = epoll_ctl(self->epoll_fd[i], EPOLL_CTL_ADD, self->fd[i * 2 + 1], &event);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+static void close_sockets(FIXTURE_DATA(msg_oob) *self)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ close(self->fd[i]);
+}
+
+FIXTURE_SETUP(msg_oob)
+{
+ create_unix_socketpair(_metadata, self);
+ create_tcp_socketpair(_metadata, self);
+
+ setup_sigurg(_metadata, self);
+ setup_epollpri(_metadata, self);
+
+ self->tcp_compliant = true;
+}
+
+FIXTURE_TEARDOWN(msg_oob)
+{
+ close_sockets(self);
+}
+
+static void __epollpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ bool oob_remaining)
+{
+ struct epoll_event event[2] = {};
+ int i, ret[2];
+
+ for (i = 0; i < 2; i++)
+ ret[i] = epoll_wait(self->epoll_fd[i], &event[i], 1, 0);
+
+ ASSERT_EQ(ret[0], oob_remaining);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(ret[0], ret[1]);
+
+ if (oob_remaining) {
+ ASSERT_EQ(event[0].events, EPOLLPRI);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(event[0].events, event[1].events);
+ }
+}
+
+static void __sendpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ const void *buf, size_t len, int flags)
+{
+ int i, ret[2];
+
+ for (i = 0; i < 2; i++) {
+ struct signalfd_siginfo siginfo = {};
+ int bytes;
+
+ ret[i] = send(self->fd[i * 2], buf, len, flags);
+
+ bytes = read(self->signal_fd, &siginfo, sizeof(siginfo));
+
+ if (flags & MSG_OOB) {
+ ASSERT_EQ(bytes, sizeof(siginfo));
+ ASSERT_EQ(siginfo.ssi_signo, SIGURG);
+
+ bytes = read(self->signal_fd, &siginfo, sizeof(siginfo));
+ }
+
+ ASSERT_EQ(bytes, -1);
+ }
+
+ ASSERT_EQ(ret[0], len);
+ ASSERT_EQ(ret[0], ret[1]);
+}
+
+static void __recvpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ const void *expected_buf, int expected_len,
+ int buf_len, int flags)
+{
+ int i, ret[2], recv_errno[2], expected_errno = 0;
+ char recv_buf[2][BUF_SZ] = {};
+ bool printed = false;
+
+ ASSERT_GE(BUF_SZ, buf_len);
+
+ errno = 0;
+
+ for (i = 0; i < 2; i++) {
+ ret[i] = recv(self->fd[i * 2 + 1], recv_buf[i], buf_len, flags);
+ recv_errno[i] = errno;
+ }
+
+ if (expected_len < 0) {
+ expected_errno = -expected_len;
+ expected_len = -1;
+ }
+
+ if (ret[0] != expected_len || recv_errno[0] != expected_errno) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("Expected:%s", expected_errno ? strerror(expected_errno) : expected_buf);
+
+ ASSERT_EQ(ret[0], expected_len);
+ ASSERT_EQ(recv_errno[0], expected_errno);
+ }
+
+ if (ret[0] != ret[1] || recv_errno[0] != recv_errno[1]) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("TCP :%s", ret[1] < 0 ? strerror(recv_errno[1]) : recv_buf[1]);
+
+ printed = true;
+
+ if (self->tcp_compliant) {
+ ASSERT_EQ(ret[0], ret[1]);
+ ASSERT_EQ(recv_errno[0], recv_errno[1]);
+ }
+ }
+
+ if (expected_len >= 0) {
+ int cmp;
+
+ cmp = strncmp(expected_buf, recv_buf[0], expected_len);
+ if (cmp) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("Expected:%s", expected_errno ? strerror(expected_errno) : expected_buf);
+
+ ASSERT_EQ(cmp, 0);
+ }
+
+ cmp = strncmp(recv_buf[0], recv_buf[1], expected_len);
+ if (cmp) {
+ if (!printed) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("TCP :%s", ret[1] < 0 ? strerror(recv_errno[1]) : recv_buf[1]);
+ }
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(cmp, 0);
+ }
+ }
+}
+
+static void __setinlinepair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ int i, oob_inline = 1;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ ret = setsockopt(self->fd[i * 2 + 1], SOL_SOCKET, SO_OOBINLINE,
+ &oob_inline, sizeof(oob_inline));
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+static void __siocatmarkpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ bool oob_head)
+{
+ int answ[2] = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ ret = ioctl(self->fd[i * 2 + 1], SIOCATMARK, &answ[i]);
+ ASSERT_EQ(ret, 0);
+ }
+
+ ASSERT_EQ(answ[0], oob_head);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(answ[0], answ[1]);
+}
+
+#define sendpair(buf, len, flags) \
+ __sendpair(_metadata, self, buf, len, flags)
+
+#define recvpair(expected_buf, expected_len, buf_len, flags) \
+ do { \
+ if (variant->peek) \
+ __recvpair(_metadata, self, \
+ expected_buf, expected_len, \
+ buf_len, (flags) | MSG_PEEK); \
+ __recvpair(_metadata, self, \
+ expected_buf, expected_len, buf_len, flags); \
+ } while (0)
+
+#define epollpair(oob_remaining) \
+ __epollpair(_metadata, self, oob_remaining)
+
+#define siocatmarkpair(oob_head) \
+ __siocatmarkpair(_metadata, self, oob_head)
+
+#define setinlinepair() \
+ __setinlinepair(_metadata, self)
+
+#define tcp_incompliant \
+ for (self->tcp_compliant = false; \
+ self->tcp_compliant == false; \
+ self->tcp_compliant = true)
+
+TEST_F(msg_oob, non_oob)
+{
+ sendpair("x", 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("", -EAGAIN, 1, 0); /* Drop OOB. */
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_ahead)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 5, 0); /* Break at OOB even with enough buffer. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("", -EAGAIN, 1, 0);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 9, 0); /* Break at OOB even after it's recv()ed. */
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("world", 5, 5, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_break_drop)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 10, 0); /* Break at OOB even with enough buffer. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("world", 5, 10, 0); /* Drop OOB and recv() the next skb. */
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, ex_oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hellowo", 7, 10, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("r", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("ld", 2, 2, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, ex_oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP drops "y" by passing through it. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, MSG_OOB); /* TCP returns -EINVAL. */
+ epollpair(false);
+ siocatmarkpair(true);
+ }
+}
+
+TEST_F(msg_oob, ex_oob_drop_2)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+ }
+
+ recvpair("y", 1, 1, MSG_OOB);
+ epollpair(false);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP returns -EAGAIN. */
+ epollpair(false);
+ siocatmarkpair(true);
+ }
+}
+
+TEST_F(msg_oob, ex_oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("r", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ tcp_incompliant {
+ recvpair("hellowol", 8, 10, 0); /* TCP recv()s "helloworl", why "r" ?? */
+ }
+
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("d", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, ex_oob_siocatmark)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob)
+{
+ setinlinepair();
+
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob_break)
+{
+ setinlinepair();
+
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 5, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("o", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ recvpair("hell", 4, 9, 0); /* Break at OOB even with enough buffer. */
+ epollpair(false);
+ siocatmarkpair(true);
+
+ tcp_incompliant {
+ recvpair("world", 5, 6, 0); /* TCP recv()s "oworld", ... "o" ??? */
+ }
+
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ recvpair("hellowo", 7, 10, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("rld", 3, 3, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_no_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ setinlinepair();
+
+ sendpair("y", 1, MSG_OOB); /* TCP does NOT drops "x" at this moment. */
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ setinlinepair();
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP recv()s "y". */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, 0); /* TCP returns -EAGAIN. */
+ epollpair(false);
+ siocatmarkpair(false);
+ }
+}
+
+TEST_F(msg_oob, inline_ex_oob_siocatmark)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ sendpair("world", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(false);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
deleted file mode 100644
index a7c51889acd5..000000000000
--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
+++ /dev/null
@@ -1,436 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/socket.h>
-#include <arpa/inet.h>
-#include <unistd.h>
-#include <string.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <errno.h>
-#include <netinet/tcp.h>
-#include <sys/un.h>
-#include <sys/signal.h>
-#include <sys/poll.h>
-
-static int pipefd[2];
-static int signal_recvd;
-static pid_t producer_id;
-static char sock_name[32];
-
-static void sig_hand(int sn, siginfo_t *si, void *p)
-{
- signal_recvd = sn;
-}
-
-static int set_sig_handler(int signal)
-{
- struct sigaction sa;
-
- sa.sa_sigaction = sig_hand;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO | SA_RESTART;
-
- return sigaction(signal, &sa, NULL);
-}
-
-static void set_filemode(int fd, int set)
-{
- int flags = fcntl(fd, F_GETFL, 0);
-
- if (set)
- flags &= ~O_NONBLOCK;
- else
- flags |= O_NONBLOCK;
- fcntl(fd, F_SETFL, flags);
-}
-
-static void signal_producer(int fd)
-{
- char cmd;
-
- cmd = 'S';
- write(fd, &cmd, sizeof(cmd));
-}
-
-static void wait_for_signal(int fd)
-{
- char buf[5];
-
- read(fd, buf, 5);
-}
-
-static void die(int status)
-{
- fflush(NULL);
- unlink(sock_name);
- kill(producer_id, SIGTERM);
- exit(status);
-}
-
-int is_sioctatmark(int fd)
-{
- int ans = -1;
-
- if (ioctl(fd, SIOCATMARK, &ans, sizeof(ans)) < 0) {
-#ifdef DEBUG
- perror("SIOCATMARK Failed");
-#endif
- }
- return ans;
-}
-
-void read_oob(int fd, char *c)
-{
-
- *c = ' ';
- if (recv(fd, c, sizeof(*c), MSG_OOB) < 0) {
-#ifdef DEBUG
- perror("Reading MSG_OOB Failed");
-#endif
- }
-}
-
-int read_data(int pfd, char *buf, int size)
-{
- int len = 0;
-
- memset(buf, size, '0');
- len = read(pfd, buf, size);
-#ifdef DEBUG
- if (len < 0)
- perror("read failed");
-#endif
- return len;
-}
-
-static void wait_for_data(int pfd, int event)
-{
- struct pollfd pfds[1];
-
- pfds[0].fd = pfd;
- pfds[0].events = event;
- poll(pfds, 1, -1);
-}
-
-void producer(struct sockaddr_un *consumer_addr)
-{
- int cfd;
- char buf[64];
- int i;
-
- memset(buf, 'x', sizeof(buf));
- cfd = socket(AF_UNIX, SOCK_STREAM, 0);
-
- wait_for_signal(pipefd[0]);
- if (connect(cfd, (struct sockaddr *)consumer_addr,
- sizeof(*consumer_addr)) != 0) {
- perror("Connect failed");
- kill(0, SIGTERM);
- exit(1);
- }
-
- for (i = 0; i < 2; i++) {
- /* Test 1: Test for SIGURG and OOB */
- wait_for_signal(pipefd[0]);
- memset(buf, 'x', sizeof(buf));
- buf[63] = '@';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 2: Test for OOB being overwitten */
- memset(buf, 'x', sizeof(buf));
- buf[63] = '%';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- buf[63] = '#';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 3: Test for SIOCATMARK */
- memset(buf, 'x', sizeof(buf));
- buf[63] = '@';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- buf[63] = '%';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- send(cfd, buf, sizeof(buf), 0);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 4: Test for 1byte OOB msg */
- memset(buf, 'x', sizeof(buf));
- buf[0] = '@';
- send(cfd, buf, 1, MSG_OOB);
- }
-}
-
-int
-main(int argc, char **argv)
-{
- int lfd, pfd;
- struct sockaddr_un consumer_addr, paddr;
- socklen_t len = sizeof(consumer_addr);
- char buf[1024];
- int on = 0;
- char oob;
- int atmark;
-
- lfd = socket(AF_UNIX, SOCK_STREAM, 0);
- memset(&consumer_addr, 0, sizeof(consumer_addr));
- consumer_addr.sun_family = AF_UNIX;
- sprintf(sock_name, "unix_oob_%d", getpid());
- unlink(sock_name);
- strcpy(consumer_addr.sun_path, sock_name);
-
- if ((bind(lfd, (struct sockaddr *)&consumer_addr,
- sizeof(consumer_addr))) != 0) {
- perror("socket bind failed");
- exit(1);
- }
-
- pipe(pipefd);
-
- listen(lfd, 1);
-
- producer_id = fork();
- if (producer_id == 0) {
- producer(&consumer_addr);
- exit(0);
- }
-
- set_sig_handler(SIGURG);
- signal_producer(pipefd[1]);
-
- pfd = accept(lfd, (struct sockaddr *) &paddr, &len);
- fcntl(pfd, F_SETOWN, getpid());
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 1:
- * veriyf that SIGURG is
- * delivered, 63 bytes are
- * read, oob is '@', and POLLPRI works.
- */
- wait_for_data(pfd, POLLPRI);
- read_oob(pfd, &oob);
- len = read_data(pfd, buf, 1024);
- if (!signal_recvd || len != 63 || oob != '@') {
- fprintf(stderr, "Test 1 failed sigurg %d len %d %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 2:
- * Verify that the first OOB is over written by
- * the 2nd one and the first OOB is returned as
- * part of the read, and sigurg is received.
- */
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = 0;
- while (len < 70)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- read_oob(pfd, &oob);
- if (!signal_recvd || len != 127 || oob != '#') {
- fprintf(stderr, "Test 2 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 3:
- * verify that 2nd oob over writes
- * the first one and read breaks at
- * oob boundary returning 127 bytes
- * and sigurg is received and atmark
- * is set.
- * oob is '%' and second read returns
- * 64 bytes.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 150)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- read_oob(pfd, &oob);
-
- if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
- fprintf(stderr,
- "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
- signal_recvd, len, oob, atmark);
- die(1);
- }
-
- signal_recvd = 0;
-
- len = read_data(pfd, buf, 1024);
- if (len != 64) {
- fprintf(stderr, "Test 3.1 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 4:
- * verify that a single byte
- * oob message is delivered.
- * set non blocking mode and
- * check proper error is
- * returned and sigurg is
- * received and correct
- * oob is read.
- */
-
- set_filemode(pfd, 0);
-
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
- if ((len == -1) && (errno == 11))
- len = 0;
-
- read_oob(pfd, &oob);
-
- if (!signal_recvd || len != 0 || oob != '@') {
- fprintf(stderr, "Test 4 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- set_filemode(pfd, 1);
-
- /* Inline Testing */
-
- on = 1;
- if (setsockopt(pfd, SOL_SOCKET, SO_OOBINLINE, &on, sizeof(on))) {
- perror("SO_OOBINLINE");
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 1 -- Inline:
- * Check that SIGURG is
- * delivered and 63 bytes are
- * read and oob is '@'
- */
-
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
-
- if (!signal_recvd || len != 63) {
- fprintf(stderr, "Test 1 Inline failed, sigurg %d len %d\n",
- signal_recvd, len);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
-
- if (len != 1) {
- fprintf(stderr,
- "Test 1.1 Inline failed, sigurg %d len %d oob %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 2 -- Inline:
- * Verify that the first OOB is over written by
- * the 2nd one and read breaks correctly on
- * 2nd OOB boundary with the first OOB returned as
- * part of the read, and sigurg is delivered and
- * siocatmark returns true.
- * next read returns one byte, the oob byte
- * and siocatmark returns false.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 70)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 127 || atmark != 1 || !signal_recvd) {
- fprintf(stderr, "Test 2 Inline failed, len %d atmark %d\n",
- len, atmark);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 1 || buf[0] != '#' || atmark == 1) {
- fprintf(stderr, "Test 2.1 Inline failed, len %d data %c atmark %d\n",
- len, buf[0], atmark);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 3 -- Inline:
- * verify that 2nd oob over writes
- * the first one and read breaks at
- * oob boundary returning 127 bytes
- * and sigurg is received and siocatmark
- * is true after the read.
- * subsequent read returns 65 bytes
- * because of oob which should be '%'.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 126)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (!signal_recvd || len != 127 || !atmark) {
- fprintf(stderr,
- "Test 3 Inline failed, sigurg %d len %d data %c\n",
- signal_recvd, len, buf[0]);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 65 || buf[0] != '%' || atmark != 0) {
- fprintf(stderr,
- "Test 3.1 Inline failed, len %d oob %c atmark %d\n",
- len, buf[0], atmark);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 4 -- Inline:
- * verify that a single
- * byte oob message is delivered
- * and read returns one byte, the oob
- * byte and sigurg is received
- */
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
- if (!signal_recvd || len != 1 || buf[0] != '@') {
- fprintf(stderr,
- "Test 4 Inline failed, signal %d len %d data %c\n",
- signal_recvd, len, buf[0]);
- die(1);
- }
- die(0);
-}
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 04de7a6ba6f3..d4891f7a2bfa 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -101,3 +101,5 @@ CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_CRYPTO_ARIA=y
CONFIG_XFRM_INTERFACE=m
CONFIG_XFRM_USER=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
diff --git a/tools/testing/selftests/net/hsr/config b/tools/testing/selftests/net/hsr/config
index 22061204fb69..241542441c51 100644
--- a/tools/testing/selftests/net/hsr/config
+++ b/tools/testing/selftests/net/hsr/config
@@ -2,3 +2,4 @@ CONFIG_IPV6=y
CONFIG_NET_SCH_NETEM=m
CONFIG_HSR=y
CONFIG_VETH=y
+CONFIG_BRIDGE=y
diff --git a/tools/testing/selftests/net/hsr/hsr_ping.sh b/tools/testing/selftests/net/hsr/hsr_ping.sh
index 790294c8af83..3684b813b0f6 100755
--- a/tools/testing/selftests/net/hsr/hsr_ping.sh
+++ b/tools/testing/selftests/net/hsr/hsr_ping.sh
@@ -174,6 +174,8 @@ trap cleanup_all_ns EXIT
setup_hsr_interfaces 0
do_complete_ping_test
+setup_ns ns1 ns2 ns3
+
setup_hsr_interfaces 1
do_complete_ping_test
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index edc030e81a46..9155c914c064 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -15,7 +15,7 @@ ksft_xfail=2
ksft_skip=4
# namespace list created by setup_ns
-NS_LIST=""
+NS_LIST=()
##############################################################################
# Helpers
@@ -27,6 +27,7 @@ __ksft_status_merge()
local -A weights
local weight=0
+ local i
for i in "$@"; do
weights[$i]=$((weight++))
done
@@ -67,9 +68,7 @@ loopy_wait()
while true
do
local out
- out=$("$@")
- local ret=$?
- if ((!ret)); then
+ if out=$("$@"); then
echo -n "$out"
return 0
fi
@@ -139,6 +138,7 @@ cleanup_ns()
fi
for ns in "$@"; do
+ [ -z "${ns}" ] && continue
ip netns delete "${ns}" &> /dev/null
if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"
@@ -152,7 +152,7 @@ cleanup_ns()
cleanup_all_ns()
{
- cleanup_ns $NS_LIST
+ cleanup_ns "${NS_LIST[@]}"
}
# setup netns with given names as prefix. e.g
@@ -161,7 +161,7 @@ setup_ns()
{
local ns=""
local ns_name=""
- local ns_list=""
+ local ns_list=()
local ns_exist=
for ns_name in "$@"; do
# Some test may setup/remove same netns multi times
@@ -177,13 +177,13 @@ setup_ns()
if ! ip netns add "$ns"; then
echo "Failed to create namespace $ns_name"
- cleanup_ns "$ns_list"
+ cleanup_ns "${ns_list[@]}"
return $ksft_skip
fi
ip -n "$ns" link set lo up
- ! $ns_exist && ns_list="$ns_list $ns"
+ ! $ns_exist && ns_list+=("$ns")
done
- NS_LIST="$NS_LIST $ns_list"
+ NS_LIST+=("${ns_list[@]}")
}
tc_rule_stats_get()
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index fefa9173bdaa..108aeeb84ef1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -261,6 +261,8 @@ reset()
TEST_NAME="${1}"
+ MPTCP_LIB_SUBTEST_FLAKY=0 # reset if modified
+
if skip_test; then
MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
last_test_ignored=1
@@ -448,7 +450,9 @@ reset_with_tcp_filter()
# $1: err msg
fail_test()
{
- ret=${KSFT_FAIL}
+ if ! mptcp_lib_subtest_is_flaky; then
+ ret=${KSFT_FAIL}
+ fi
if [ ${#} -gt 0 ]; then
print_fail "${@}"
@@ -2245,9 +2249,10 @@ remove_tests()
if reset "remove invalid addresses"; then
pm_nl_set_limits $ns1 3 3
pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+ # broadcast IP: no packet for this address will be received on ns1
+ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
- pm_nl_set_limits $ns2 3 3
+ pm_nl_set_limits $ns2 2 2
addr_nr_ns1=-3 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
@@ -3069,6 +3074,7 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=client \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
@@ -3077,6 +3083,7 @@ fastclose_tests()
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=server \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 0 0 0 1
@@ -3095,6 +3102,7 @@ fail_tests()
{
# single subflow
if reset_with_fail "Infinite map" 1; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=128 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
@@ -3103,6 +3111,7 @@ fail_tests()
# multiple subflows
if reset_with_fail "MP_FAIL MP_RST" 2; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index ad2ebda5cb64..6ffa9b7a3260 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -21,6 +21,7 @@ declare -rx MPTCP_LIB_AF_INET6=10
MPTCP_LIB_SUBTESTS=()
MPTCP_LIB_SUBTESTS_DUPLICATED=0
+MPTCP_LIB_SUBTEST_FLAKY=0
MPTCP_LIB_TEST_COUNTER=0
MPTCP_LIB_TEST_FORMAT="%02u %-50s"
MPTCP_LIB_IP_MPTCP=0
@@ -41,6 +42,16 @@ else
readonly MPTCP_LIB_COLOR_RESET=
fi
+# SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY env var can be set not to ignore errors
+# from subtests marked as flaky
+mptcp_lib_override_flaky() {
+ [ "${SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY:-}" = 1 ]
+}
+
+mptcp_lib_subtest_is_flaky() {
+ [ "${MPTCP_LIB_SUBTEST_FLAKY}" = 1 ] && ! mptcp_lib_override_flaky
+}
+
# $1: color, $2: text
mptcp_lib_print_color() {
echo -e "${MPTCP_LIB_START_PRINT:-}${*}${MPTCP_LIB_COLOR_RESET}"
@@ -72,7 +83,16 @@ mptcp_lib_pr_skip() {
}
mptcp_lib_pr_fail() {
- mptcp_lib_print_err "[FAIL]${1:+ ${*}}"
+ local title cmt
+
+ if mptcp_lib_subtest_is_flaky; then
+ title="IGNO"
+ cmt=" (flaky)"
+ else
+ title="FAIL"
+ fi
+
+ mptcp_lib_print_err "[${title}]${cmt}${1:+ ${*}}"
}
mptcp_lib_pr_info() {
@@ -208,7 +228,13 @@ mptcp_lib_result_pass() {
# $1: test name
mptcp_lib_result_fail() {
- __mptcp_lib_result_add "not ok" "${1}"
+ if mptcp_lib_subtest_is_flaky; then
+ # It might sound better to use 'not ok # TODO' or 'ok # SKIP',
+ # but some CIs don't understand 'TODO' and treat SKIP as errors.
+ __mptcp_lib_result_add "ok" "${1} # IGNORE Flaky"
+ else
+ __mptcp_lib_result_add "not ok" "${1}"
+ fi
}
# $1: test name
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 4b14b4412166..f74e1c3c126d 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -244,7 +244,7 @@ run_test()
do_transfer $small $large $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -254,7 +254,7 @@ run_test()
do_transfer $large $small $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -290,7 +290,7 @@ run_test 10 10 0 0 "balanced bwidth"
run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
# we still need some additional infrastructure to pass the following test-cases
-run_test 10 3 0 0 "unbalanced bwidth"
+MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth"
run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 9e2981f2d7f5..9cb05978269d 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -160,10 +160,12 @@ make_connection()
local is_v6=$1
local app_port=$app4_port
local connect_addr="10.0.1.1"
+ local client_addr="10.0.1.2"
local listen_addr="0.0.0.0"
if [ "$is_v6" = "v6" ]
then
connect_addr="dead:beef:1::1"
+ client_addr="dead:beef:1::2"
listen_addr="::"
app_port=$app6_port
else
@@ -206,6 +208,7 @@ make_connection()
[ "$server_serverside" = 1 ]
then
test_pass
+ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
else
test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
mptcp_lib_result_print_all_tap
@@ -297,7 +300,7 @@ test_announce()
ip netns exec "$ns2"\
./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
ns2eth1
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
"$client4_port"
@@ -306,7 +309,7 @@ test_announce()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann\
dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1
- print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
"$client_addr_id" "$client6_port" "v6"
@@ -316,7 +319,7 @@ test_announce()
client_addr_id=$((client_addr_id+1))
ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
$client_addr_id dev ns2eth1 port $new4_port
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port"
+ print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
"$client_addr_id" "$new4_port"
@@ -327,7 +330,7 @@ test_announce()
# ADD_ADDR from the server to client machine reusing the subflow port
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$app4_port"
@@ -336,7 +339,7 @@ test_announce()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
"$server_addr_id" "$app6_port" "v6"
@@ -346,7 +349,7 @@ test_announce()
server_addr_id=$((server_addr_id+1))
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2 port $new4_port
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port"
+ print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$new4_port"
@@ -380,7 +383,7 @@ test_remove()
local invalid_token=$(( client4_token - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
$client_addr_id > /dev/null 2>&1
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
+ print_test "RM_ADDR id:client ns2 => ns1, invalid token"
local type
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
@@ -394,7 +397,7 @@ test_remove()
local invalid_id=$(( client_addr_id + 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$invalid_id > /dev/null 2>&1
- print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
+ print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id"
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
then
@@ -407,7 +410,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -416,7 +419,7 @@ test_remove()
client_addr_id=$(( client_addr_id - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -424,7 +427,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
$client_addr_id
- print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR6 id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
@@ -434,7 +437,7 @@ test_remove()
# RM_ADDR from the server to client machine
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -443,7 +446,7 @@ test_remove()
server_addr_id=$(( server_addr_id - 1 ))
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -451,7 +454,7 @@ test_remove()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
$server_addr_id
- print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR6 id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
}
@@ -479,8 +482,14 @@ verify_subflow_events()
local locid
local remid
local info
+ local e_dport_txt
- info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
+ # only display the fixed ports
+ if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then
+ e_dport_txt=":${e_dport}"
+ fi
+
+ info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})"
if [ "$e_type" = "$SUB_ESTABLISHED" ]
then
@@ -766,7 +775,7 @@ test_subflows_v4_v6_mix()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
"$server_addr_id" "$app6_port"
@@ -861,7 +870,7 @@ test_listener()
local listener_pid=$!
sleep 0.5
- print_test "CREATE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CREATE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
# ADD_ADDR from client to server machine reusing the subflow port
@@ -878,13 +887,14 @@ test_listener()
mptcp_lib_kill_wait $listener_pid
sleep 0.5
- print_test "CLOSE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CLOSE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
print_title "Make connections"
make_connection
make_connection "v6"
+print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)"
test_announce
test_remove
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 5cae53543849..15bca0708717 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# OVS kernel module self tests
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 1dd057afd3fb..9f8dec2f6539 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -531,7 +531,7 @@ class ovsactions(nla):
for flat_act in parse_flat_map:
if parse_starts_block(actstr, flat_act[0], False):
actstr = actstr[len(flat_act[0]):]
- self["attrs"].append([flat_act[1]])
+ self["attrs"].append([flat_act[1], True])
actstr = actstr[strspn(actstr, ", ") :]
parsed = True
diff --git a/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh b/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh
new file mode 100755
index 000000000000..e23210aa547f
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh
@@ -0,0 +1,335 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Jianguo Wu <wujianguo@chinatelecom.cn>
+#
+# Mostly copied from tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh.
+#
+# This script is designed for testing the support of netfilter hooks for
+# SRv6 End.DX4 behavior.
+#
+# Hereafter a network diagram is shown, where one tenants (named 100) offer
+# IPv4 L3 VPN services allowing hosts to communicate with each other across
+# an IPv6 network.
+#
+# Routers rt-1 and rt-2 implement IPv4 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DX4 behavior.
+#
+# To explain how an IPv4 L3 VPN based on SRv6 works, let us briefly consider an
+# example where, within the same domain of tenant 100, the host hs-1 pings
+# the host hs-2.
+#
+# First of all, L2 reachability of the host hs-2 is taken into account by
+# the router rt-1 which acts as an arp proxy.
+#
+# When the host hs-1 sends an IPv4 packet destined to hs-2, the router rt-1
+# receives the packet on the internal veth-t100 interface, rt-1 contains the
+# SRv6 Encap route for encapsulating the IPv4 packet in a IPv6 plus the Segment
+# Routing Header (SRH) packet. This packet is sent through the (IPv6) core
+# network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and
+# routs the packet to the specified nexthop. Afterwards, the packet is sent to
+# the host hs-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the role of rt-1
+# and rt-2 are swapped.
+#
+# And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a
+# rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING
+# hooks.
+#
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | hs-1 netns | | hs-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 10.0.0.1/24 | | | | 10.0.0.2/24 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +-----------------------------------+
+# | . | | . |
+# | +---------------+ | | +---------------- |
+# | | veth-t100 | | | | veth-t100 | |
+# | | 10.0.0.11/24 | +----------+ | | +----------+ | 10.0.0.22/24 | |
+# | +-------+-------+ | route | | | | route | +-------+-------- |
+# | | table | | | | table | |
+# | +----------+ | | +----------+ |
+# | +--------------+ | | +--------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | |
+# | +--------------+ | | +--------------+ |
+# | | | |
+# | rt-1 netns | | rt-2 netns |
+# | | | |
+# +-----------------------------------+ +-----------------------------------+
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table
+# +----------------------------------------------------------------+
+# |SID |Action |
+# +----------------------------------------------------------------+
+# |fc00:21:100::6004|apply SRv6 End.DX4 nh4 10.0.0.1 dev veth-t100 |
+# +----------------------------------------------------------------+
+#
+# rt-1: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |10.0.0.2 |apply seg6 encap segs fc00:12:100::6004|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table
+# +---------------------------------------------------------------+
+# |SID |Action |
+# +---------------------------------------------------------------+
+# |fc00:12:100::6004|apply SRv6 End.DX4 nh4 10.0.0.2 dev veth-t100|
+# +---------------------------------------------------------------+
+#
+# rt-2: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |10.0.0.1 |apply seg6 encap segs fc00:21:100::6004|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+readonly IPv6_RT_NETWORK=2001:11
+readonly IPv4_HS_NETWORK=10.0.0
+readonly SID_LOCATOR=fc00
+
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+cleanup()
+{
+ ip link del veth-rt-1 2>/dev/null || true
+ ip link del veth-rt-2 2>/dev/null || true
+
+ # destroy routers rt-* and hosts hs-*
+ for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+ ip netns del ${ns} || true
+ done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns add ${nsname}
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip link set veth-rt-${rt} netns ${nsname}
+ ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+ ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ ip -netns ${nsname} link set veth0 up
+ ip -netns ${nsname} link set lo up
+
+ ip netns exec ${nsname} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_rt_netfilter()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1
+ ip netns exec ${nsname} iptables -t raw -A PREROUTING -m rpfilter --invert -j DROP
+}
+
+setup_hs()
+{
+ local hs=$1
+ local rt=$2
+ local tid=$3
+ local hsname=hs-${hs}
+ local rtname=rt-${rt}
+ local rtveth=veth-t${tid}
+
+ # set the networking for the host
+ ip netns add ${hsname}
+
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
+ ip -netns ${hsname} link set veth0 up
+ ip -netns ${hsname} link set lo up
+
+ ip -netns ${rtname} addr add ${IPv4_HS_NETWORK}.${rt}${hs}/24 dev ${rtveth}
+ ip -netns ${rtname} link set ${rtveth} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+}
+
+setup_vpn_config()
+{
+ local hssrc=$1
+ local rtsrc=$2
+ local hsdst=$3
+ local rtdst=$4
+ local tid=$5
+
+ local hssrc_name=hs-t${tid}-${hssrc}
+ local hsdst_name=hs-t${tid}-${hsdst}
+ local rtsrc_name=rt-${rtsrc}
+ local rtdst_name=rt-${rtdst}
+ local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004
+
+ # set the encap route for encapsulating packets which arrive from the
+ # host hssrc and destined to the access router rtsrc.
+ ip -netns ${rtsrc_name} -4 route add ${IPv4_HS_NETWORK}.${hsdst}/32 \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \
+ via 2001:11::${rtdst} dev veth0
+
+ # set the decap route for decapsulating packets which arrive from
+ # the rtdst router and destined to the hsdst host.
+ ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \
+ encap seg6local action End.DX4 nh4 ${IPv4_HS_NETWORK}.${hsdst} dev veth-t${tid}
+}
+
+setup()
+{
+ ip link add veth-rt-1 type veth peer name veth-rt-2
+ # setup the networking for router rt-1 and router rt-2
+ setup_rt_networking 1
+ setup_rt_networking 2
+
+ # setup two hosts for the tenant 100.
+ # - host hs-1 is directly connected to the router rt-1;
+ # - host hs-2 is directly connected to the router rt-2.
+ setup_hs 1 1 100
+ setup_hs 2 2 100
+
+ # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2.
+ setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant
+ setup_vpn_config 2 2 1 1 100
+}
+
+check_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv4_HS_NETWORK}.${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ check_hs_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})"
+}
+
+host_tests()
+{
+ log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+router_netfilter_tests()
+{
+ log_section "SRv6 VPN connectivity test with netfilter enabled in routers"
+ setup_rt_netfilter 1
+ setup_rt_netfilter 2
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup &>/dev/null
+
+setup
+
+host_tests
+router_netfilter_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
diff --git a/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh b/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh
new file mode 100755
index 000000000000..9e69a2ed5bc3
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh
@@ -0,0 +1,340 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Jianguo Wu <wujianguo@chinatelecom.cn>
+#
+# Mostly copied from tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh.
+#
+# This script is designed for testing the support of netfilter hooks for
+# SRv6 End.DX4 behavior.
+#
+# Hereafter a network diagram is shown, where one tenants (named 100) offer
+# IPv6 L3 VPN services allowing hosts to communicate with each other across
+# an IPv6 network.
+#
+# Routers rt-1 and rt-2 implement IPv6 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DX4 behavior.
+#
+# To explain how an IPv6 L3 VPN based on SRv6 works, let us briefly consider an
+# example where, within the same domain of tenant 100, the host hs-1 pings
+# the host hs-2.
+#
+# First of all, L2 reachability of the host hs-2 is taken into account by
+# the router rt-1 which acts as an arp proxy.
+#
+# When the host hs-1 sends an IPv6 packet destined to hs-2, the router rt-1
+# receives the packet on the internal veth-t100 interface, rt-1 contains the
+# SRv6 Encap route for encapsulating the IPv6 packet in a IPv6 plus the Segment
+# Routing Header (SRH) packet. This packet is sent through the (IPv6) core
+# network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and
+# routs the packet to the specified nexthop. Afterwards, the packet is sent to
+# the host hs-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the role of rt-1
+# and rt-2 are swapped.
+#
+# And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a
+# rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING
+# hooks.
+#
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | hs-1 netns | | hs-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | cafe::1/64 | | | | cafe::2/64 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +-----------------------------------+
+# | . | | . |
+# | +---------------+ | | +---------------- |
+# | | veth-t100 | | | | veth-t100 | |
+# | | cafe::11/64 | +----------+ | | +----------+ | cafe::22/64 | |
+# | +-------+-------+ | route | | | | route | +-------+-------- |
+# | | table | | | | table | |
+# | +----------+ | | +----------+ |
+# | +--------------+ | | +--------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | |
+# | +--------------+ | | +--------------+ |
+# | | | |
+# | rt-1 netns | | rt-2 netns |
+# | | | |
+# +-----------------------------------+ +-----------------------------------+
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table
+# +----------------------------------------------------------------+
+# |SID |Action |
+# +----------------------------------------------------------------+
+# |fc00:21:100::6004|apply SRv6 End.DX6 nh6 cafe::1 dev veth-t100 |
+# +----------------------------------------------------------------+
+#
+# rt-1: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::2 |apply seg6 encap segs fc00:12:100::6004|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table
+# +---------------------------------------------------------------+
+# |SID |Action |
+# +---------------------------------------------------------------+
+# |fc00:12:100::6004|apply SRv6 End.DX6 nh6 cafe::2 dev veth-t100 |
+# +---------------------------------------------------------------+
+#
+# rt-2: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::1 |apply seg6 encap segs fc00:21:100::6004|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+readonly IPv6_RT_NETWORK=2001:11
+readonly IPv6_HS_NETWORK=cafe
+readonly SID_LOCATOR=fc00
+
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+cleanup()
+{
+ ip link del veth-rt-1 2>/dev/null || true
+ ip link del veth-rt-2 2>/dev/null || true
+
+ # destroy routers rt-* and hosts hs-*
+ for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+ ip netns del ${ns} || true
+ done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns add ${nsname}
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip link set veth-rt-${rt} netns ${nsname}
+ ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+ ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ ip -netns ${nsname} link set veth0 up
+ ip -netns ${nsname} link set lo up
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_rt_netfilter()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1
+ ip netns exec ${nsname} ip6tables -t raw -A PREROUTING -m rpfilter --invert -j DROP
+}
+
+setup_hs()
+{
+ local hs=$1
+ local rt=$2
+ local tid=$3
+ local hsname=hs-${hs}
+ local rtname=rt-${rt}
+ local rtveth=veth-t${tid}
+
+ # set the networking for the host
+ ip netns add ${hsname}
+
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+ ip -netns ${hsname} link set veth0 up
+ ip -netns ${hsname} link set lo up
+
+ ip -netns ${rtname} addr add ${IPv6_HS_NETWORK}::${rt}${hs}/64 dev ${rtveth}
+ ip -netns ${rtname} link set ${rtveth} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+}
+
+setup_vpn_config()
+{
+ local hssrc=$1
+ local rtsrc=$2
+ local hsdst=$3
+ local rtdst=$4
+ local tid=$5
+
+ local hssrc_name=hs-t${tid}-${hssrc}
+ local hsdst_name=hs-t${tid}-${hsdst}
+ local rtsrc_name=rt-${rtsrc}
+ local rtdst_name=rt-${rtdst}
+ local rtveth=veth-t${tid}
+ local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004
+
+ ip -netns ${rtsrc_name} -6 neigh add proxy ${IPv6_HS_NETWORK}::${hsdst} dev ${rtveth}
+
+ # set the encap route for encapsulating packets which arrive from the
+ # host hssrc and destined to the access router rtsrc.
+ ip -netns ${rtsrc_name} -6 route add ${IPv6_HS_NETWORK}::${hsdst}/128 \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \
+ via 2001:11::${rtdst} dev veth0
+
+ # set the decap route for decapsulating packets which arrive from
+ # the rtdst router and destined to the hsdst host.
+ ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \
+ encap seg6local action End.DX6 nh6 ${IPv6_HS_NETWORK}::${hsdst} dev veth-t${tid}
+}
+
+setup()
+{
+ ip link add veth-rt-1 type veth peer name veth-rt-2
+ # setup the networking for router rt-1 and router rt-2
+ setup_rt_networking 1
+ setup_rt_networking 2
+
+ # setup two hosts for the tenant 100.
+ # - host hs-1 is directly connected to the router rt-1;
+ # - host hs-2 is directly connected to the router rt-2.
+ setup_hs 1 1 100
+ setup_hs 2 2 100
+
+ # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2.
+ setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant
+ setup_vpn_config 2 2 1 1 100
+}
+
+check_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-${hssrc} ping -6 -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv6_HS_NETWORK}::${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ check_hs_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})"
+}
+
+host_tests()
+{
+ log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+router_netfilter_tests()
+{
+ log_section "SRv6 VPN connectivity test with netfilter enabled in routers"
+ setup_rt_netfilter 1
+ setup_rt_netfilter 2
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup &>/dev/null
+
+setup
+
+host_tests
+router_netfilter_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
index 254d676a2689..185dc76ebb5f 100644
--- a/tools/testing/selftests/openat2/Makefile
+++ b/tools/testing/selftests/openat2/Makefile
@@ -1,8 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
+# gcc requires -static-libasan in order to ensure that Address Sanitizer's
+# library is the first one loaded. However, clang already statically links the
+# Address Sanitizer if -fsanitize is specified. Therefore, simply omit
+# -static-libasan for clang builds.
+ifeq ($(LLVM),)
+ CFLAGS += -static-libasan
+endif
+
+LOCAL_HDRS += helpers.h
+
include ../lib.mk
-$(TEST_GEN_PROGS): helpers.c helpers.h
+$(TEST_GEN_PROGS): helpers.c
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 9024754530b2..5790ab446527 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -5,6 +5,7 @@
*/
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
index b83099160fbc..94886c82ae60 100644
--- a/tools/testing/selftests/seccomp/seccomp_benchmark.c
+++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c
@@ -194,14 +194,14 @@ int main(int argc, char *argv[])
ksft_set_plan(7);
ksft_print_msg("Running on:\n");
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("uname -a");
ksft_print_msg("Current BPF sysctl settings:\n");
/* Avoid using "sysctl" which may not be installed. */
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("grep -H . /proc/sys/net/core/bpf_jit_enable");
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("grep -H . /proc/sys/net/core/bpf_jit_harden");
affinity();
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 12da0a939e3e..557fb074acf0 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -133,6 +133,50 @@
]
},
{
+ "id": "6f62",
+ "name": "Add taprio Qdisc with too short interval",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
+ "id": "831f",
+ "name": "Add taprio Qdisc with too short cycle-time",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
"id": "3e1e",
"name": "Add taprio Qdisc with an invalid cycle-time",
"category": [
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 86d267db87bb..7bc74969a819 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -55,6 +55,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
struct kvm_memory_slot *memslot;
int as_id, id;
+ if (!mask)
+ return;
+
as_id = slot >> 16;
id = (u16)slot;
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 0f4e0cf4f158..747fe251e445 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -510,8 +510,10 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
}
if (folio_test_hwpoison(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
r = -EHWPOISON;
- goto out_unlock;
+ goto out_fput;
}
page = folio_file_page(folio, index);
@@ -522,7 +524,6 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
r = 0;
-out_unlock:
folio_unlock(folio);
out_fput:
fput(file);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 14841acb8b95..1192942aef91 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -651,7 +651,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
range->on_lock(kvm);
if (IS_KVM_NULL_FN(range->handler))
- break;
+ goto mmu_unlock;
}
r.ret |= range->handler(kvm, &gfn_range);
}
@@ -660,6 +660,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
if (range->flush_on_ret && r.ret)
kvm_flush_remote_tlbs(kvm);
+mmu_unlock:
if (r.found_memslot)
KVM_MMU_UNLOCK(kvm);
@@ -4025,12 +4026,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int last_boosted_vcpu;
unsigned long i;
int yielded = 0;
int try = 3;
int pass;
+ last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
@@ -4068,7 +4070,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- kvm->last_boosted_vcpu = i;
+ WRITE_ONCE(kvm->last_boosted_vcpu, i);
break;
} else if (yielded < 0) {
try--;
@@ -4427,7 +4429,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs;
r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@ -4454,8 +4456,7 @@ out_free1:
break;
}
case KVM_GET_SREGS: {
- kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
- GFP_KERNEL_ACCOUNT);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
@@ -4547,7 +4548,7 @@ out_free1:
break;
}
case KVM_GET_FPU: {
- fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
r = -ENOMEM;
if (!fpu)
goto out;
@@ -6210,7 +6211,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
active = kvm_active_vms;
mutex_unlock(&kvm_lock);
- env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
if (!env)
return;
@@ -6226,7 +6227,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (!IS_ERR(kvm->debugfs_dentry)) {
- char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
+ char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);